aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/sfc
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-10-20 11:17:52 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-10-20 11:17:52 +0900
commit4cb40f795af36b3deb743f6ccf6c3fd542c61c8d (patch)
treedb3d7519932549bf528f5b8e4cb8350356cd544d /drivers/net/sfc
parent79ed2a9216dd3cc35c4f2c5dbaddadb195af83ac (diff)
parent0cfd81031a26717fe14380d18275f8e217571615 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/kernel-parameters.txt arch/sh/include/asm/elf.h
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/bitfield.h178
-rw-r--r--drivers/net/sfc/boards.c12
-rw-r--r--drivers/net/sfc/boards.h2
-rw-r--r--drivers/net/sfc/efx.c489
-rw-r--r--drivers/net/sfc/efx.h14
-rw-r--r--drivers/net/sfc/enum.h9
-rw-r--r--drivers/net/sfc/ethtool.c184
-rw-r--r--drivers/net/sfc/falcon.c1019
-rw-r--r--drivers/net/sfc/falcon.h17
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h80
-rw-r--r--drivers/net/sfc/falcon_io.h1
-rw-r--r--drivers/net/sfc/falcon_xmac.c346
-rw-r--r--drivers/net/sfc/mac.h4
-rw-r--r--drivers/net/sfc/mdio_10g.c16
-rw-r--r--drivers/net/sfc/mdio_10g.h13
-rw-r--r--drivers/net/sfc/net_driver.h144
-rw-r--r--drivers/net/sfc/phy.h10
-rw-r--r--drivers/net/sfc/rx.c78
-rw-r--r--drivers/net/sfc/rx.h4
-rw-r--r--drivers/net/sfc/selftest.c391
-rw-r--r--drivers/net/sfc/selftest.h13
-rw-r--r--drivers/net/sfc/sfe4001.c248
-rw-r--r--drivers/net/sfc/spi.h89
-rw-r--r--drivers/net/sfc/tenxpress.c149
-rw-r--r--drivers/net/sfc/tx.c385
-rw-r--r--drivers/net/sfc/tx.h2
-rw-r--r--drivers/net/sfc/workarounds.h4
-rw-r--r--drivers/net/sfc/xfp_phy.c12
28 files changed, 2087 insertions, 1826 deletions
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2c79d27404e..d95c2182801 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -52,9 +52,9 @@
*
* The maximum width mask that can be generated is 64 bits.
*/
-#define EFX_MASK64(field) \
- (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \
- (((((u64) 1) << EFX_WIDTH(field))) - 1))
+#define EFX_MASK64(width) \
+ ((width) == 64 ? ~((u64) 0) : \
+ (((((u64) 1) << (width))) - 1))
/* Mask equal in width to the specified field.
*
@@ -63,9 +63,9 @@
* The maximum width mask that can be generated is 32 bits. Use
* EFX_MASK64 for higher width fields.
*/
-#define EFX_MASK32(field) \
- (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \
- (((((u32) 1) << EFX_WIDTH(field))) - 1))
+#define EFX_MASK32(width) \
+ ((width) == 32 ? ~((u32) 0) : \
+ (((((u32) 1) << (width))) - 1))
/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
typedef union efx_dword {
@@ -138,44 +138,49 @@ typedef union efx_oword {
EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
#define EFX_EXTRACT_OWORD64(oword, low, high) \
- (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
- EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
+ ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
+ EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
+ EFX_MASK64(high + 1 - low))
#define EFX_EXTRACT_QWORD64(qword, low, high) \
- EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
+ (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
+ EFX_MASK64(high + 1 - low))
#define EFX_EXTRACT_OWORD32(oword, low, high) \
- (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
- EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
- EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
- EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
+ ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
+ EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
+ EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
+ EFX_MASK32(high + 1 - low))
#define EFX_EXTRACT_QWORD32(qword, low, high) \
- (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
- EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
+ ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
+ EFX_MASK32(high + 1 - low))
-#define EFX_EXTRACT_DWORD(dword, low, high) \
- EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
+#define EFX_EXTRACT_DWORD(dword, low, high) \
+ (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
+ EFX_MASK32(high + 1 - low))
-#define EFX_OWORD_FIELD64(oword, field) \
- (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
- & EFX_MASK64(field))
+#define EFX_OWORD_FIELD64(oword, field) \
+ EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
-#define EFX_QWORD_FIELD64(qword, field) \
- (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
- & EFX_MASK64(field))
+#define EFX_QWORD_FIELD64(qword, field) \
+ EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
-#define EFX_OWORD_FIELD32(oword, field) \
- (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
- & EFX_MASK32(field))
+#define EFX_OWORD_FIELD32(oword, field) \
+ EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
-#define EFX_QWORD_FIELD32(qword, field) \
- (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
- & EFX_MASK32(field))
+#define EFX_QWORD_FIELD32(qword, field) \
+ EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
-#define EFX_DWORD_FIELD(dword, field) \
- (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
- & EFX_MASK32(field))
+#define EFX_DWORD_FIELD(dword, field) \
+ EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
#define EFX_OWORD_IS_ZERO64(oword) \
(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
@@ -411,69 +416,102 @@ typedef union efx_oword {
* for read-modify-write operations.
*
*/
-
#define EFX_INVERT_OWORD(oword) do { \
(oword).u64[0] = ~((oword).u64[0]); \
(oword).u64[1] = ~((oword).u64[1]); \
} while (0)
-#define EFX_INSERT_FIELD64(...) \
- cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
+#define EFX_AND_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
+ } while (0)
+
+#define EFX_OR_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
+ } while (0)
-#define EFX_INSERT_FIELD32(...) \
- cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
+#define EFX_INSERT64(min, max, low, high, value) \
+ cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
-#define EFX_INPLACE_MASK64(min, max, field) \
- EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
+#define EFX_INSERT32(min, max, low, high, value) \
+ cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
-#define EFX_INPLACE_MASK32(min, max, field) \
- EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
+#define EFX_INPLACE_MASK64(min, max, low, high) \
+ EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
-#define EFX_SET_OWORD_FIELD64(oword, field, value) do { \
+#define EFX_INPLACE_MASK32(min, max, low, high) \
+ EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
+
+#define EFX_SET_OWORD64(oword, low, high, value) do { \
(oword).u64[0] = (((oword).u64[0] \
- & ~EFX_INPLACE_MASK64(0, 63, field)) \
- | EFX_INSERT_FIELD64(0, 63, field, value)); \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
(oword).u64[1] = (((oword).u64[1] \
- & ~EFX_INPLACE_MASK64(64, 127, field)) \
- | EFX_INSERT_FIELD64(64, 127, field, value)); \
+ & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
+ | EFX_INSERT64(64, 127, low, high, value)); \
} while (0)
-#define EFX_SET_QWORD_FIELD64(qword, field, value) do { \
+#define EFX_SET_QWORD64(qword, low, high, value) do { \
(qword).u64[0] = (((qword).u64[0] \
- & ~EFX_INPLACE_MASK64(0, 63, field)) \
- | EFX_INSERT_FIELD64(0, 63, field, value)); \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
} while (0)
-#define EFX_SET_OWORD_FIELD32(oword, field, value) do { \
+#define EFX_SET_OWORD32(oword, low, high, value) do { \
(oword).u32[0] = (((oword).u32[0] \
- & ~EFX_INPLACE_MASK32(0, 31, field)) \
- | EFX_INSERT_FIELD32(0, 31, field, value)); \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
(oword).u32[1] = (((oword).u32[1] \
- & ~EFX_INPLACE_MASK32(32, 63, field)) \
- | EFX_INSERT_FIELD32(32, 63, field, value)); \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
(oword).u32[2] = (((oword).u32[2] \
- & ~EFX_INPLACE_MASK32(64, 95, field)) \
- | EFX_INSERT_FIELD32(64, 95, field, value)); \
+ & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
+ | EFX_INSERT32(64, 95, low, high, value)); \
(oword).u32[3] = (((oword).u32[3] \
- & ~EFX_INPLACE_MASK32(96, 127, field)) \
- | EFX_INSERT_FIELD32(96, 127, field, value)); \
+ & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
+ | EFX_INSERT32(96, 127, low, high, value)); \
} while (0)
-#define EFX_SET_QWORD_FIELD32(qword, field, value) do { \
+#define EFX_SET_QWORD32(qword, low, high, value) do { \
(qword).u32[0] = (((qword).u32[0] \
- & ~EFX_INPLACE_MASK32(0, 31, field)) \
- | EFX_INSERT_FIELD32(0, 31, field, value)); \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
(qword).u32[1] = (((qword).u32[1] \
- & ~EFX_INPLACE_MASK32(32, 63, field)) \
- | EFX_INSERT_FIELD32(32, 63, field, value)); \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
} while (0)
-#define EFX_SET_DWORD_FIELD(dword, field, value) do { \
- (dword).u32[0] = (((dword).u32[0] \
- & ~EFX_INPLACE_MASK32(0, 31, field)) \
- | EFX_INSERT_FIELD32(0, 31, field, value)); \
+#define EFX_SET_DWORD32(dword, low, high, value) do { \
+ (dword).u32[0] = (((dword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
} while (0)
+#define EFX_SET_OWORD_FIELD64(oword, field, value) \
+ EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD64(qword, field, value) \
+ EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_OWORD_FIELD32(oword, field, value) \
+ EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD32(qword, field, value) \
+ EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_DWORD_FIELD(dword, field, value) \
+ EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+
+
#if BITS_PER_LONG == 64
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
@@ -502,4 +540,10 @@ typedef union efx_oword {
#define EFX_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
+
+/* Static initialiser */
+#define EFX_OWORD32(a, b, c, d) \
+ { .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \
+ __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } }
+
#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index d3d3dd0a117..99e60237326 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -31,23 +31,23 @@ static void blink_led_timer(unsigned long context)
mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
}
-static void board_blink(struct efx_nic *efx, int blink)
+static void board_blink(struct efx_nic *efx, bool blink)
{
struct efx_blinker *blinker = &efx->board_info.blinker;
/* The rtnl mutex serialises all ethtool ioctls, so
* nothing special needs doing here. */
if (blink) {
- blinker->resubmit = 1;
- blinker->state = 0;
+ blinker->resubmit = true;
+ blinker->state = false;
setup_timer(&blinker->timer, blink_led_timer,
(unsigned long)efx);
mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
} else {
- blinker->resubmit = 0;
+ blinker->resubmit = false;
if (blinker->timer.function)
del_timer_sync(&blinker->timer);
- efx->board_info.set_fault_led(efx, 0);
+ efx->board_info.set_fault_led(efx, false);
}
}
@@ -78,7 +78,7 @@ static int sfe4002_init_leds(struct efx_nic *efx)
return 0;
}
-static void sfe4002_fault_led(struct efx_nic *efx, int state)
+static void sfe4002_fault_led(struct efx_nic *efx, bool state)
{
xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
QUAKE_LED_OFF);
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index e5e844359ce..c6e01b64bfb 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -21,7 +21,5 @@ enum efx_board_type {
extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
extern int sfe4001_init(struct efx_nic *efx);
-/* Are we putting the PHY into flash config mode */
-extern unsigned int sfe4001_phy_flash_cfg;
#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 45c72eebb3a..06ea71c7e34 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -28,7 +28,6 @@
#include "efx.h"
#include "mdio_10g.h"
#include "falcon.h"
-#include "workarounds.h"
#include "mac.h"
#define EFX_MAX_MTU (9 * 1024)
@@ -52,7 +51,7 @@ static struct workqueue_struct *refill_workqueue;
* This sets the default for new devices. It can be controlled later
* using ethtool.
*/
-static int lro = 1;
+static int lro = true;
module_param(lro, int, 0644);
MODULE_PARM_DESC(lro, "Large receive offload acceleration");
@@ -65,7 +64,7 @@ MODULE_PARM_DESC(lro, "Large receive offload acceleration");
* This is forced to 0 for MSI interrupt mode as the interrupt vector
* is not written
*/
-static unsigned int separate_tx_and_rx_channels = 1;
+static unsigned int separate_tx_and_rx_channels = true;
/* This is the weight assigned to each of the (per-channel) virtual
* NAPI devices.
@@ -81,7 +80,7 @@ unsigned int efx_monitor_interval = 1 * HZ;
/* This controls whether or not the hardware monitor will trigger a
* reset when it detects an error condition.
*/
-static unsigned int monitor_reset = 1;
+static unsigned int monitor_reset = true;
/* This controls whether or not the driver will initialise devices
* with invalid MAC addresses stored in the EEPROM or flash. If true,
@@ -141,8 +140,7 @@ static void efx_fini_channels(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
- if ((efx->state == STATE_RUNNING) || \
- (efx->state == STATE_RESETTING)) \
+ if (efx->state == STATE_RUNNING) \
ASSERT_RTNL(); \
} while (0)
@@ -159,16 +157,18 @@ static void efx_fini_channels(struct efx_nic *efx);
* never be concurrently called more than once on the same channel,
* though different channels may be being processed concurrently.
*/
-static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
+static int efx_process_channel(struct efx_channel *channel, int rx_quota)
{
- int rxdmaqs;
- struct efx_rx_queue *rx_queue;
+ struct efx_nic *efx = channel->efx;
+ int rx_packets;
- if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
+ if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
!channel->enabled))
- return rx_quota;
+ return 0;
- rxdmaqs = falcon_process_eventq(channel, &rx_quota);
+ rx_packets = falcon_process_eventq(channel, rx_quota);
+ if (rx_packets == 0)
+ return 0;
/* Deliver last RX packet. */
if (channel->rx_pkt) {
@@ -180,16 +180,9 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
efx_flush_lro(channel);
efx_rx_strategy(channel);
- /* Refill descriptor rings as necessary */
- rx_queue = &channel->efx->rx_queue[0];
- while (rxdmaqs) {
- if (rxdmaqs & 0x01)
- efx_fast_push_rx_descriptors(rx_queue);
- rx_queue++;
- rxdmaqs >>= 1;
- }
+ efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
- return rx_quota;
+ return rx_packets;
}
/* Mark channel as finished processing
@@ -203,7 +196,7 @@ static inline void efx_channel_processed(struct efx_channel *channel)
/* The interrupt handler for this channel may set work_pending
* as soon as we acknowledge the events we've seen. Make sure
* it's cleared before then. */
- channel->work_pending = 0;
+ channel->work_pending = false;
smp_wmb();
falcon_eventq_read_ack(channel);
@@ -219,14 +212,12 @@ static int efx_poll(struct napi_struct *napi, int budget)
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
struct net_device *napi_dev = channel->napi_dev;
- int unused;
int rx_packets;
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
- unused = efx_process_channel(channel, budget);
- rx_packets = (budget - unused);
+ rx_packets = efx_process_channel(channel, budget);
if (rx_packets < budget) {
/* There is no race here; although napi_disable() will
@@ -260,7 +251,7 @@ void efx_process_channel_now(struct efx_channel *channel)
falcon_disable_interrupts(efx);
if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- if (channel->has_interrupt && channel->irq)
+ if (channel->irq)
synchronize_irq(channel->irq);
/* Wait for any NAPI processing to complete */
@@ -290,13 +281,13 @@ static int efx_probe_eventq(struct efx_channel *channel)
}
/* Prepare channel's event queue */
-static int efx_init_eventq(struct efx_channel *channel)
+static void efx_init_eventq(struct efx_channel *channel)
{
EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
channel->eventq_read_ptr = 0;
- return falcon_init_eventq(channel);
+ falcon_init_eventq(channel);
}
static void efx_fini_eventq(struct efx_channel *channel)
@@ -362,12 +353,11 @@ static int efx_probe_channel(struct efx_channel *channel)
* to propagate configuration changes (mtu, checksum offload), or
* to clear hardware error conditions
*/
-static int efx_init_channels(struct efx_nic *efx)
+static void efx_init_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
- int rc = 0;
/* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
@@ -382,36 +372,20 @@ static int efx_init_channels(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
- rc = efx_init_eventq(channel);
- if (rc)
- goto err;
+ efx_init_eventq(channel);
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- rc = efx_init_tx_queue(tx_queue);
- if (rc)
- goto err;
- }
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue(tx_queue);
/* The rx buffer allocation strategy is MTU dependent */
efx_rx_strategy(channel);
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- rc = efx_init_rx_queue(rx_queue);
- if (rc)
- goto err;
- }
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_init_rx_queue(rx_queue);
WARN_ON(channel->rx_pkt != NULL);
efx_rx_strategy(channel);
}
-
- return 0;
-
- err:
- EFX_ERR(efx, "failed to initialise channel %d\n",
- channel ? channel->channel : -1);
- efx_fini_channels(efx);
- return rc;
}
/* This enables event queue processing and packet transmission.
@@ -432,8 +406,8 @@ static void efx_start_channel(struct efx_channel *channel)
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set. */
- channel->work_pending = 0;
- channel->enabled = 1;
+ channel->work_pending = false;
+ channel->enabled = true;
smp_wmb();
napi_enable(&channel->napi_str);
@@ -456,7 +430,7 @@ static void efx_stop_channel(struct efx_channel *channel)
EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
- channel->enabled = 0;
+ channel->enabled = false;
napi_disable(&channel->napi_str);
/* Ensure that any worker threads have exited or will be no-ops */
@@ -471,10 +445,17 @@ static void efx_fini_channels(struct efx_nic *efx)
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
+ rc = falcon_flush_queues(efx);
+ if (rc)
+ EFX_ERR(efx, "failed to flush queues\n");
+ else
+ EFX_LOG(efx, "successfully flushed all queues\n");
+
efx_for_each_channel(channel, efx) {
EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
@@ -482,13 +463,6 @@ static void efx_fini_channels(struct efx_nic *efx)
efx_fini_rx_queue(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
- }
-
- /* Do the event queues last so that we can handle flush events
- * for all DMA queues. */
- efx_for_each_channel(channel, efx) {
- EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
-
efx_fini_eventq(channel);
}
}
@@ -526,8 +500,6 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
*/
static void efx_link_status_changed(struct efx_nic *efx)
{
- int carrier_ok;
-
/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
* that no events are triggered between unregister_netdev() and the
* driver unloading. A more general condition is that NETDEV_CHANGE
@@ -535,8 +507,12 @@ static void efx_link_status_changed(struct efx_nic *efx)
if (!netif_running(efx->net_dev))
return;
- carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
- if (efx->link_up != carrier_ok) {
+ if (efx->port_inhibited) {
+ netif_carrier_off(efx->net_dev);
+ return;
+ }
+
+ if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
efx->n_link_state_changes++;
if (efx->link_up)
@@ -577,13 +553,19 @@ static void efx_link_status_changed(struct efx_nic *efx)
/* This call reinitialises the MAC to pick up new PHY settings. The
* caller must hold the mac_lock */
-static void __efx_reconfigure_port(struct efx_nic *efx)
+void __efx_reconfigure_port(struct efx_nic *efx)
{
WARN_ON(!mutex_is_locked(&efx->mac_lock));
EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
raw_smp_processor_id());
+ /* Serialise the promiscuous flag with efx_set_multicast_list. */
+ if (efx_dev_registered(efx)) {
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+ }
+
falcon_reconfigure_xmac(efx);
/* Inform kernel of loss/gain of carrier */
@@ -661,7 +643,8 @@ static int efx_init_port(struct efx_nic *efx)
if (rc)
return rc;
- efx->port_initialized = 1;
+ efx->port_initialized = true;
+ efx->stats_enabled = true;
/* Reconfigure port to program MAC registers */
falcon_reconfigure_xmac(efx);
@@ -678,7 +661,7 @@ static void efx_start_port(struct efx_nic *efx)
BUG_ON(efx->port_enabled);
mutex_lock(&efx->mac_lock);
- efx->port_enabled = 1;
+ efx->port_enabled = true;
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -692,7 +675,7 @@ static void efx_stop_port(struct efx_nic *efx)
EFX_LOG(efx, "stop port\n");
mutex_lock(&efx->mac_lock);
- efx->port_enabled = 0;
+ efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */
@@ -710,9 +693,9 @@ static void efx_fini_port(struct efx_nic *efx)
return;
falcon_fini_xmac(efx);
- efx->port_initialized = 0;
+ efx->port_initialized = false;
- efx->link_up = 0;
+ efx->link_up = false;
efx_link_status_changed(efx);
}
@@ -797,7 +780,7 @@ static int efx_init_io(struct efx_nic *efx)
return 0;
fail4:
- release_mem_region(efx->membase_phys, efx->type->mem_map_size);
+ pci_release_region(efx->pci_dev, efx->type->mem_bar);
fail3:
efx->membase_phys = 0;
fail2:
@@ -823,53 +806,61 @@ static void efx_fini_io(struct efx_nic *efx)
pci_disable_device(efx->pci_dev);
}
-/* Probe the number and type of interrupts we are able to obtain. */
+/* Get number of RX queues wanted. Return number of online CPU
+ * packages in the expectation that an IRQ balancer will spread
+ * interrupts across them. */
+static int efx_wanted_rx_queues(void)
+{
+ cpumask_t core_mask;
+ int count;
+ int cpu;
+
+ cpus_clear(core_mask);
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpu_isset(cpu, core_mask)) {
+ ++count;
+ cpus_or(core_mask, core_mask,
+ topology_core_siblings(cpu));
+ }
+ }
+
+ return count;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
static void efx_probe_interrupts(struct efx_nic *efx)
{
- int max_channel = efx->type->phys_addr_channels - 1;
- struct msix_entry xentries[EFX_MAX_CHANNELS];
+ int max_channels =
+ min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
int rc, i;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
- BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
-
- if (rss_cpus == 0) {
- cpumask_t core_mask;
- int cpu;
-
- cpus_clear(core_mask);
- efx->rss_queues = 0;
- for_each_online_cpu(cpu) {
- if (!cpu_isset(cpu, core_mask)) {
- ++efx->rss_queues;
- cpus_or(core_mask, core_mask,
- topology_core_siblings(cpu));
- }
- }
- } else {
- efx->rss_queues = rss_cpus;
- }
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ int wanted_ints;
- efx->rss_queues = min(efx->rss_queues, max_channel + 1);
- efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
+ /* We want one RX queue and interrupt per CPU package
+ * (or as specified by the rss_cpus module parameter).
+ * We will need one channel per interrupt.
+ */
+ wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
+ efx->n_rx_queues = min(wanted_ints, max_channels);
- /* Request maximum number of MSI interrupts, and fill out
- * the channel interrupt information the allowed allocation */
- for (i = 0; i < efx->rss_queues; i++)
+ for (i = 0; i < efx->n_rx_queues; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
+ rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
if (rc > 0) {
- EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
- efx->rss_queues = rc;
+ EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
+ efx->n_rx_queues = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
- efx->rss_queues);
+ efx->n_rx_queues);
}
if (rc == 0) {
- for (i = 0; i < efx->rss_queues; i++) {
- efx->channel[i].has_interrupt = 1;
+ for (i = 0; i < efx->n_rx_queues; i++)
efx->channel[i].irq = xentries[i].vector;
- }
} else {
/* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -879,11 +870,10 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->rss_queues = 1;
+ efx->n_rx_queues = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
- efx->channel[0].has_interrupt = 1;
} else {
EFX_ERR(efx, "could not enable MSI\n");
efx->interrupt_mode = EFX_INT_MODE_LEGACY;
@@ -892,10 +882,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->rss_queues = 1;
- /* Every channel is interruptible */
- for (i = 0; i < EFX_MAX_CHANNELS; i++)
- efx->channel[i].has_interrupt = 1;
+ efx->n_rx_queues = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
}
@@ -905,7 +892,7 @@ static void efx_remove_interrupts(struct efx_nic *efx)
struct efx_channel *channel;
/* Remove MSI/MSI-X interrupts */
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel(channel, efx)
channel->irq = 0;
pci_disable_msi(efx->pci_dev);
pci_disable_msix(efx->pci_dev);
@@ -914,45 +901,22 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
-/* Select number of used resources
- * Should be called after probe_interrupts()
- */
-static void efx_select_used(struct efx_nic *efx)
+static void efx_set_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- int i;
- /* TX queues. One per port per channel with TX capability
- * (more than one per port won't work on Linux, due to out
- * of order issues... but will be fine on Solaris)
- */
- tx_queue = &efx->tx_queue[0];
-
- /* Perform this for each channel with TX capabilities.
- * At the moment, we only support a single TX queue
- */
- tx_queue->used = 1;
- if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
- tx_queue->channel = &efx->channel[1];
- else
- tx_queue->channel = &efx->channel[0];
- tx_queue->channel->used_flags |= EFX_USED_BY_TX;
- tx_queue++;
-
- /* RX queues. Each has a dedicated channel. */
- for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
- rx_queue = &efx->rx_queue[i];
+ efx_for_each_tx_queue(tx_queue, efx) {
+ if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
+ tx_queue->channel = &efx->channel[1];
+ else
+ tx_queue->channel = &efx->channel[0];
+ tx_queue->channel->used_flags |= EFX_USED_BY_TX;
+ }
- if (i < efx->rss_queues) {
- rx_queue->used = 1;
- /* If we allow multiple RX queues per channel
- * we need to decide that here
- */
- rx_queue->channel = &efx->channel[rx_queue->queue];
- rx_queue->channel->used_flags |= EFX_USED_BY_RX;
- rx_queue++;
- }
+ efx_for_each_rx_queue(rx_queue, efx) {
+ rx_queue->channel = &efx->channel[rx_queue->queue];
+ rx_queue->channel->used_flags |= EFX_USED_BY_RX;
}
}
@@ -971,8 +935,7 @@ static int efx_probe_nic(struct efx_nic *efx)
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
- /* Determine number of RX queues and TX queues */
- efx_select_used(efx);
+ efx_set_channels(efx);
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
@@ -1058,7 +1021,8 @@ static void efx_start_all(struct efx_nic *efx)
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- efx_wake_queue(efx);
+ if (efx_dev_registered(efx))
+ efx_wake_queue(efx);
efx_for_each_channel(channel, efx)
efx_start_channel(channel);
@@ -1109,7 +1073,7 @@ static void efx_stop_all(struct efx_nic *efx)
falcon_disable_interrupts(efx);
if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx_for_each_channel_with_interrupt(channel, efx) {
+ efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
}
@@ -1128,13 +1092,12 @@ static void efx_stop_all(struct efx_nic *efx)
/* Isolate the MAC from the TX and RX engines, so that queue
* flushes will complete in a timely fashion. */
- falcon_deconfigure_mac_wrapper(efx);
falcon_drain_tx_fifo(efx);
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
- efx_stop_queue(efx);
if (efx_dev_registered(efx)) {
+ efx_stop_queue(efx);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1151,24 +1114,16 @@ static void efx_remove_all(struct efx_nic *efx)
}
/* A convinience function to safely flush all the queues */
-int efx_flush_queues(struct efx_nic *efx)
+void efx_flush_queues(struct efx_nic *efx)
{
- int rc;
-
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
efx_fini_channels(efx);
- rc = efx_init_channels(efx);
- if (rc) {
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- return rc;
- }
+ efx_init_channels(efx);
efx_start_all(efx);
-
- return 0;
}
/**************************************************************************
@@ -1249,7 +1204,7 @@ static void efx_monitor(struct work_struct *data)
*/
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1303,10 +1258,10 @@ static void efx_fini_napi(struct efx_nic *efx)
*/
static void efx_netpoll(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel(channel, efx)
efx_schedule_channel(channel);
}
@@ -1321,12 +1276,15 @@ static void efx_netpoll(struct net_device *net_dev)
/* Context: process, rtnl_lock() held. */
static int efx_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
raw_smp_processor_id());
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ return -EBUSY;
+
efx_start_all(efx);
return 0;
}
@@ -1337,8 +1295,7 @@ static int efx_net_open(struct net_device *net_dev)
*/
static int efx_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
- int rc;
+ struct efx_nic *efx = netdev_priv(net_dev);
EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
raw_smp_processor_id());
@@ -1346,9 +1303,7 @@ static int efx_net_stop(struct net_device *net_dev)
/* Stop the device and flush all the channels */
efx_stop_all(efx);
efx_fini_channels(efx);
- rc = efx_init_channels(efx);
- if (rc)
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_init_channels(efx);
return 0;
}
@@ -1356,7 +1311,7 @@ static int efx_net_stop(struct net_device *net_dev)
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct net_device_stats *stats = &net_dev->stats;
@@ -1366,7 +1321,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
*/
if (!spin_trylock(&efx->stats_lock))
return stats;
- if (efx->state == STATE_RUNNING) {
+ if (efx->stats_enabled) {
falcon_update_stats_xmac(efx);
falcon_update_nic_stats(efx);
}
@@ -1403,7 +1358,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
/* Context: netif_tx_lock held, BHs disabled. */
static void efx_watchdog(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
atomic_read(&efx->netif_stop_count), efx->port_enabled,
@@ -1417,7 +1372,7 @@ static void efx_watchdog(struct net_device *net_dev)
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
int rc = 0;
EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1431,21 +1386,15 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
efx_fini_channels(efx);
net_dev->mtu = new_mtu;
- rc = efx_init_channels(efx);
- if (rc)
- goto fail;
+ efx_init_channels(efx);
efx_start_all(efx);
return rc;
-
- fail:
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- return rc;
}
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
@@ -1466,26 +1415,19 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
return 0;
}
-/* Context: netif_tx_lock held, BHs disabled. */
+/* Context: netif_addr_lock held, BHs disabled. */
static void efx_set_multicast_list(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct dev_mc_list *mc_list = net_dev->mc_list;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
- int promiscuous;
+ bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
+ bool changed = (efx->promiscuous != promiscuous);
u32 crc;
int bit;
int i;
- /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
- promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
- if (efx->promiscuous != promiscuous) {
- efx->promiscuous = promiscuous;
- /* Close the window between efx_stop_port() and efx_flush_all()
- * by only queuing work when the port is enabled. */
- if (efx->port_enabled)
- queue_work(efx->workqueue, &efx->reconfigure_work);
- }
+ efx->promiscuous = promiscuous;
/* Build multicast hash table */
if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
@@ -1500,6 +1442,13 @@ static void efx_set_multicast_list(struct net_device *net_dev)
}
}
+ if (!efx->port_enabled)
+ /* Delay pushing settings until efx_start_port() */
+ return;
+
+ if (changed)
+ queue_work(efx->workqueue, &efx->reconfigure_work);
+
/* Create and activate new global multicast hash table */
falcon_set_multicast_hash(efx);
}
@@ -1510,7 +1459,7 @@ static int efx_netdev_event(struct notifier_block *this,
struct net_device *net_dev = ptr;
if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
strcpy(efx->name, net_dev->name);
}
@@ -1568,7 +1517,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (!efx->net_dev)
return;
- BUG_ON(efx->net_dev->priv != efx);
+ BUG_ON(netdev_priv(efx->net_dev) != efx);
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
@@ -1588,49 +1537,60 @@ static void efx_unregister_netdev(struct efx_nic *efx)
*
**************************************************************************/
-/* The final hardware and software finalisation before reset. */
-static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+/* Tears down the entire software state and most of the hardware state
+ * before reset. */
+void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
+ /* The net_dev->get_stats handler is quite slow, and will fail
+ * if a fetch is pending over reset. Serialise against it. */
+ spin_lock(&efx->stats_lock);
+ efx->stats_enabled = false;
+ spin_unlock(&efx->stats_lock);
+
+ efx_stop_all(efx);
+ mutex_lock(&efx->mac_lock);
+
rc = falcon_xmac_get_settings(efx, ecmd);
- if (rc) {
+ if (rc)
EFX_ERR(efx, "could not back up PHY settings\n");
- goto fail;
- }
efx_fini_channels(efx);
- return 0;
-
- fail:
- return rc;
}
-/* The first part of software initialisation after a hardware reset
- * This function does not handle serialisation with the kernel, it
- * assumes the caller has done this */
-static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE. */
+int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
{
int rc;
- rc = efx_init_channels(efx);
- if (rc)
- goto fail1;
+ EFX_ASSERT_RESET_SERIALISED(efx);
- /* Restore MAC and PHY settings. */
- rc = falcon_xmac_set_settings(efx, ecmd);
+ rc = falcon_init_nic(efx);
if (rc) {
- EFX_ERR(efx, "could not restore PHY settings\n");
- goto fail2;
+ EFX_ERR(efx, "failed to initialise NIC\n");
+ ok = false;
}
- return 0;
+ if (ok) {
+ efx_init_channels(efx);
- fail2:
- efx_fini_channels(efx);
- fail1:
+ if (falcon_xmac_set_settings(efx, ecmd))
+ EFX_ERR(efx, "could not restore PHY settings\n");
+ }
+
+ mutex_unlock(&efx->mac_lock);
+
+ if (ok) {
+ efx_start_all(efx);
+ efx->stats_enabled = true;
+ }
return rc;
}
@@ -1659,25 +1619,14 @@ static int efx_reset(struct efx_nic *efx)
goto unlock_rtnl;
}
- efx->state = STATE_RESETTING;
EFX_INFO(efx, "resetting (%d)\n", method);
- /* The net_dev->get_stats handler is quite slow, and will fail
- * if a fetch is pending over reset. Serialise against it. */
- spin_lock(&efx->stats_lock);
- spin_unlock(&efx->stats_lock);
-
- efx_stop_all(efx);
- mutex_lock(&efx->mac_lock);
-
- rc = efx_reset_down(efx, &ecmd);
- if (rc)
- goto fail1;
+ efx_reset_down(efx, &ecmd);
rc = falcon_reset_hw(efx, method);
if (rc) {
EFX_ERR(efx, "failed to reset hardware\n");
- goto fail2;
+ goto fail;
}
/* Allow resets to be rescheduled. */
@@ -1689,46 +1638,27 @@ static int efx_reset(struct efx_nic *efx)
* can respond to requests. */
pci_set_master(efx->pci_dev);
- /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
- * case so the driver can talk to external SRAM */
- rc = falcon_init_nic(efx);
- if (rc) {
- EFX_ERR(efx, "failed to initialise NIC\n");
- goto fail3;
- }
-
/* Leave device stopped if necessary */
if (method == RESET_TYPE_DISABLE) {
- /* Reinitialise the device anyway so the driver unload sequence
- * can talk to the external SRAM */
- falcon_init_nic(efx);
rc = -EIO;
- goto fail4;
+ goto fail;
}
- rc = efx_reset_up(efx, &ecmd);
+ rc = efx_reset_up(efx, &ecmd, true);
if (rc)
- goto fail5;
+ goto disable;
- mutex_unlock(&efx->mac_lock);
EFX_LOG(efx, "reset complete\n");
-
- efx->state = STATE_RUNNING;
- efx_start_all(efx);
-
unlock_rtnl:
rtnl_unlock();
return 0;
- fail5:
- fail4:
- fail3:
- fail2:
- fail1:
+ fail:
+ efx_reset_up(efx, &ecmd, false);
+ disable:
EFX_ERR(efx, "has been disabled\n");
efx->state = STATE_DISABLED;
- mutex_unlock(&efx->mac_lock);
rtnl_unlock();
efx_unregister_netdev(efx);
efx_fini_port(efx);
@@ -1801,7 +1731,7 @@ static struct pci_device_id efx_pci_table[] __devinitdata = {
*
* Dummy PHY/MAC/Board operations
*
- * Can be used where the MAC does not implement this operation
+ * Can be used for some unimplemented operations
* Needed so all function pointers are valid and do not have to be tested
* before use
*
@@ -1811,7 +1741,7 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
return 0;
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
-void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
+void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
static struct efx_phy_operations efx_dummy_phy_operations = {
.init = efx_port_dummy_op_int,
@@ -1819,20 +1749,14 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
.check_hw = efx_port_dummy_op_int,
.fini = efx_port_dummy_op_void,
.clear_interrupt = efx_port_dummy_op_void,
- .reset_xaui = efx_port_dummy_op_void,
};
-/* Dummy board operations */
-static int efx_nic_dummy_op_int(struct efx_nic *nic)
-{
- return 0;
-}
-
static struct efx_board efx_dummy_board_info = {
- .init = efx_nic_dummy_op_int,
- .init_leds = efx_port_dummy_op_int,
- .set_fault_led = efx_port_dummy_op_blink,
- .fini = efx_port_dummy_op_void,
+ .init = efx_port_dummy_op_int,
+ .init_leds = efx_port_dummy_op_int,
+ .set_fault_led = efx_port_dummy_op_blink,
+ .blink = efx_port_dummy_op_blink,
+ .fini = efx_port_dummy_op_void,
};
/**************************************************************************
@@ -1865,7 +1789,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->board_info = efx_dummy_board_info;
efx->net_dev = net_dev;
- efx->rx_checksum_enabled = 1;
+ efx->rx_checksum_enabled = true;
spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
@@ -1878,10 +1802,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
channel = &efx->channel[i];
channel->efx = efx;
channel->channel = i;
- channel->evqnum = i;
- channel->work_pending = 0;
+ channel->work_pending = false;
}
- for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
+ for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
tx_queue = &efx->tx_queue[i];
tx_queue->efx = efx;
tx_queue->queue = i;
@@ -2056,19 +1979,16 @@ static int efx_pci_probe_main(struct efx_nic *efx)
goto fail5;
}
- rc = efx_init_channels(efx);
- if (rc)
- goto fail6;
+ efx_init_channels(efx);
rc = falcon_init_interrupt(efx);
if (rc)
- goto fail7;
+ goto fail6;
return 0;
- fail7:
- efx_fini_channels(efx);
fail6:
+ efx_fini_channels(efx);
efx_fini_port(efx);
fail5:
fail4:
@@ -2105,7 +2025,10 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
NETIF_F_HIGHDMA | NETIF_F_TSO);
if (lro)
net_dev->features |= NETIF_F_LRO;
- efx = net_dev->priv;
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_TSO);
+ efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
rc = efx_init_struct(efx, type, pci_dev, net_dev);
if (rc)
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 3b2f69f4a9a..d02937b70ee 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -28,15 +28,21 @@ extern void efx_wake_queue(struct efx_nic *efx);
/* RX */
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, int checksummed, int discard);
+ unsigned int len, bool checksummed, bool discard);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
-extern int efx_flush_queues(struct efx_nic *efx);
+extern void efx_flush_queues(struct efx_nic *efx);
/* Ports */
extern void efx_reconfigure_port(struct efx_nic *efx);
+extern void __efx_reconfigure_port(struct efx_nic *efx);
+
+/* Reset handling */
+extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
+ bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
@@ -50,7 +56,7 @@ extern void efx_hex_dump(const u8 *, unsigned int, const char *);
/* Dummy PHY ops for PHY drivers */
extern int efx_port_dummy_op_int(struct efx_nic *efx);
extern void efx_port_dummy_op_void(struct efx_nic *efx);
-extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
+extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink);
extern unsigned int efx_monitor_interval;
@@ -59,7 +65,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
{
EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
- channel->work_pending = 1;
+ channel->work_pending = true;
netif_rx_schedule(channel->napi_dev, &channel->napi_str);
}
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index c53290d08e2..cec15dbb88e 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -52,12 +52,11 @@ extern const char *efx_loopback_mode_names[];
#define LOOPBACK_MASK(_efx) \
(1 << (_efx)->loopback_mode)
-#define LOOPBACK_INTERNAL(_efx) \
- ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0)
+#define LOOPBACK_INTERNAL(_efx) \
+ (!!(LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)))
-#define LOOPBACK_OUT_OF(_from, _to, _mask) \
- (((LOOPBACK_MASK(_from) & (_mask)) && \
- ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
+#define LOOPBACK_OUT_OF(_from, _to, _mask) \
+ ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
/*****************************************************************************/
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index e2c75d10161..fa98af58223 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -17,6 +17,7 @@
#include "ethtool.h"
#include "falcon.h"
#include "gmii.h"
+#include "spi.h"
#include "mac.h"
const char *efx_loopback_mode_names[] = {
@@ -32,8 +33,6 @@ const char *efx_loopback_mode_names[] = {
[LOOPBACK_NETWORK] = "NETWORK",
};
-static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
-
struct ethtool_string {
char name[ETH_GSTRING_LEN];
};
@@ -173,6 +172,11 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
/* Number of ethtool statistics */
#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+/* EEPROM range with gPXE configuration */
+#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
+#define EFX_ETHTOOL_EEPROM_MIN 0x100U
+#define EFX_ETHTOOL_EEPROM_MAX 0x400U
+
/**************************************************************************
*
* Ethtool operations
@@ -183,7 +187,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
/* Identify device by flashing LEDs */
static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
efx->board_info.blink(efx, 1);
schedule_timeout_interruptible(seconds * HZ);
@@ -195,7 +199,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
int efx_ethtool_get_settings(struct net_device *net_dev,
struct ethtool_cmd *ecmd)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -209,7 +213,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
int efx_ethtool_set_settings(struct net_device *net_dev,
struct ethtool_cmd *ecmd)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -224,7 +228,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
@@ -329,7 +333,10 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
unsigned int n = 0;
enum efx_loopback_mode mode;
- /* Interrupt */
+ efx_fill_test(n++, strings, data, &tests->mii,
+ "core", 0, "mii", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
"core", 0, "interrupt", NULL);
@@ -349,16 +356,17 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
"eventq.poll", NULL);
}
- /* PHY presence */
- efx_fill_test(n++, strings, data, &tests->phy_ok,
- EFX_PORT_NAME, "phy_ok", NULL);
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+ efx_fill_test(n++, strings, data, &tests->phy,
+ EFX_PORT_NAME, "phy", NULL);
/* Loopback tests */
efx_fill_test(n++, strings, data, &tests->loopback_speed,
EFX_PORT_NAME, "loopback.speed", NULL);
efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
EFX_PORT_NAME, "loopback.full_duplex", NULL);
- for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(efx->loopback_modes & (1 << mode)))
continue;
n = efx_fill_loopback_test(efx,
@@ -369,22 +377,24 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
return n;
}
-static int efx_ethtool_get_stats_count(struct net_device *net_dev)
+static int efx_ethtool_get_sset_count(struct net_device *net_dev,
+ int string_set)
{
- return EFX_ETHTOOL_NUM_STATS;
-}
-
-static int efx_ethtool_self_test_count(struct net_device *net_dev)
-{
- struct efx_nic *efx = net_dev->priv;
-
- return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return EFX_ETHTOOL_NUM_STATS;
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
+ NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
}
static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct ethtool_string *ethtool_strings =
(struct ethtool_string *)strings;
int i;
@@ -410,7 +420,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct efx_ethtool_stat *stat;
struct efx_channel *channel;
@@ -442,60 +452,21 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
}
}
-static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
-{
- int rc;
-
- /* Our TSO requires TX checksumming, so force TX checksumming
- * on when TSO is enabled.
- */
- if (enable) {
- rc = efx_ethtool_set_tx_csum(net_dev, 1);
- if (rc)
- return rc;
- }
-
- return ethtool_op_set_tso(net_dev, enable);
-}
-
-static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
-{
- struct efx_nic *efx = net_dev->priv;
- int rc;
-
- rc = ethtool_op_set_tx_csum(net_dev, enable);
- if (rc)
- return rc;
-
- efx_flush_queues(efx);
-
- /* Our TSO requires TX checksumming, so disable TSO when
- * checksumming is disabled
- */
- if (!enable) {
- rc = efx_ethtool_set_tso(net_dev, 0);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
/* No way to stop the hardware doing the checks; we just
* ignore the result.
*/
- efx->rx_checksum_enabled = (enable ? 1 : 0);
+ efx->rx_checksum_enabled = !!enable;
return 0;
}
static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
return efx->rx_checksum_enabled;
}
@@ -503,7 +474,7 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct efx_self_tests efx_tests;
int offline, already_up;
int rc;
@@ -533,15 +504,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto out;
/* Perform offline tests only if online tests passed */
- if (offline) {
- /* Stop the kernel from sending packets during the test. */
- efx_stop_queue(efx);
- rc = efx_flush_queues(efx);
- if (!rc)
- rc = efx_offline_test(efx, &efx_tests,
- efx->loopback_modes);
- efx_wake_queue(efx);
- }
+ if (offline)
+ rc = efx_offline_test(efx, &efx_tests,
+ efx->loopback_modes);
out:
if (!already_up)
@@ -561,22 +526,65 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
/* Restart autonegotiation */
static int efx_ethtool_nway_reset(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
return mii_nway_restart(&efx->mii);
}
static u32 efx_ethtool_get_link(struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
return efx->link_up;
}
+static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_spi_device *spi = efx->spi_eeprom;
+
+ if (!spi)
+ return 0;
+ return min(spi->size, EFX_ETHTOOL_EEPROM_MAX) -
+ min(spi->size, EFX_ETHTOOL_EEPROM_MIN);
+}
+
+static int efx_ethtool_get_eeprom(struct net_device *net_dev,
+ struct ethtool_eeprom *eeprom, u8 *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_spi_device *spi = efx->spi_eeprom;
+ size_t len;
+ int rc;
+
+ rc = falcon_spi_read(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
+ eeprom->len, &len, buf);
+ eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
+ eeprom->len = len;
+ return rc;
+}
+
+static int efx_ethtool_set_eeprom(struct net_device *net_dev,
+ struct ethtool_eeprom *eeprom, u8 *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_spi_device *spi = efx->spi_eeprom;
+ size_t len;
+ int rc;
+
+ if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
+ return -EINVAL;
+
+ rc = falcon_spi_write(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
+ eeprom->len, &len, buf);
+ eeprom->len = len;
+ return rc;
+}
+
static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
@@ -614,7 +622,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned tx_usecs, rx_usecs;
@@ -657,7 +665,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
enum efx_fc_type flow_control = efx->flow_control;
int rc;
@@ -680,11 +688,11 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
- pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
- pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
- pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
+ pause->rx_pause = !!(efx->flow_control & EFX_FC_RX);
+ pause->tx_pause = !!(efx->flow_control & EFX_FC_TX);
+ pause->autoneg = !!(efx->flow_control & EFX_FC_AUTO);
}
@@ -694,6 +702,9 @@ struct ethtool_ops efx_ethtool_ops = {
.get_drvinfo = efx_ethtool_get_drvinfo,
.nway_reset = efx_ethtool_nway_reset,
.get_link = efx_ethtool_get_link,
+ .get_eeprom_len = efx_ethtool_get_eeprom_len,
+ .get_eeprom = efx_ethtool_get_eeprom,
+ .set_eeprom = efx_ethtool_set_eeprom,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
.get_pauseparam = efx_ethtool_get_pauseparam,
@@ -701,17 +712,16 @@ struct ethtool_ops efx_ethtool_ops = {
.get_rx_csum = efx_ethtool_get_rx_csum,
.set_rx_csum = efx_ethtool_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = efx_ethtool_set_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
- .set_tso = efx_ethtool_set_tso,
+ .set_tso = ethtool_op_set_tso,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
- .self_test_count = efx_ethtool_self_test_count,
+ .get_sset_count = efx_ethtool_get_sset_count,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.phys_id = efx_ethtool_phys_id,
- .get_stats_count = efx_ethtool_get_stats_count,
.get_ethtool_stats = efx_ethtool_get_stats,
};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9138ee5b7b7..31ed1f49de0 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -108,10 +108,10 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
/* Max number of internal errors. After this resets will not be performed */
#define FALCON_MAX_INT_ERRORS 4
-/* Maximum period that we wait for flush events. If the flush event
- * doesn't arrive in this period of time then we check if the queue
- * was disabled anyway. */
-#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
+/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
+ */
+#define FALCON_FLUSH_INTERVAL 10
+#define FALCON_FLUSH_POLL_COUNT 100
/**************************************************************************
*
@@ -242,7 +242,7 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
* falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
* it to be used for event queues, descriptor rings etc.
*/
-static int
+static void
falcon_init_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer)
{
@@ -266,8 +266,6 @@ falcon_init_special_buffer(struct efx_nic *efx,
BUF_OWNER_ID_FBUF, 0);
falcon_write_sram(efx, &buf_desc, index);
}
-
- return 0;
}
/* Unmaps a buffer from Falcon and clears the buffer table entries */
@@ -449,16 +447,15 @@ int falcon_probe_tx(struct efx_tx_queue *tx_queue)
sizeof(efx_qword_t));
}
-int falcon_init_tx(struct efx_tx_queue *tx_queue)
+void falcon_init_tx(struct efx_tx_queue *tx_queue)
{
efx_oword_t tx_desc_ptr;
struct efx_nic *efx = tx_queue->efx;
- int rc;
+
+ tx_queue->flushed = false;
/* Pin TX descriptor ring */
- rc = falcon_init_special_buffer(efx, &tx_queue->txd);
- if (rc)
- return rc;
+ falcon_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
EFX_POPULATE_OWORD_10(tx_desc_ptr,
@@ -466,7 +463,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
TX_ISCSI_DDIG_EN, 0,
TX_ISCSI_HDIG_EN, 0,
TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
- TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
+ TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
TX_DESCQ_OWNER_ID, 0,
TX_DESCQ_LABEL, tx_queue->queue,
TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
@@ -474,9 +471,9 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
TX_NON_IP_DROP_DIS_B0, 1);
if (falcon_rev(efx) >= FALCON_REV_B0) {
- int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
- EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
- EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
+ int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
+ EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
+ EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
}
falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -485,73 +482,28 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
if (falcon_rev(efx) < FALCON_REV_B0) {
efx_oword_t reg;
- BUG_ON(tx_queue->queue >= 128); /* HW limit */
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
- if (efx->net_dev->features & NETIF_F_IP_CSUM)
+ if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
clear_bit_le(tx_queue->queue, (void *)&reg);
else
set_bit_le(tx_queue->queue, (void *)&reg);
falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
}
-
- return 0;
}
-static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
+static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
- struct efx_channel *channel = &efx->channel[0];
efx_oword_t tx_flush_descq;
- unsigned int read_ptr, i;
/* Post a flush command */
EFX_POPULATE_OWORD_2(tx_flush_descq,
TX_FLUSH_DESCQ_CMD, 1,
TX_FLUSH_DESCQ, tx_queue->queue);
falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
- msleep(FALCON_FLUSH_TIMEOUT);
-
- if (EFX_WORKAROUND_7803(efx))
- return 0;
-
- /* Look for a flush completed event */
- read_ptr = channel->eventq_read_ptr;
- for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
- efx_qword_t *event = falcon_event(channel, read_ptr);
- int ev_code, ev_sub_code, ev_queue;
- if (!falcon_event_present(event))
- break;
-
- ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
- ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
- ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
- if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
- (ev_queue == tx_queue->queue)) {
- EFX_LOG(efx, "tx queue %d flush command succesful\n",
- tx_queue->queue);
- return 0;
- }
-
- read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
- }
-
- if (EFX_WORKAROUND_11557(efx)) {
- efx_oword_t reg;
- int enabled;
-
- falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
- enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
- if (!enabled) {
- EFX_LOG(efx, "tx queue %d disabled without a "
- "flush event seen\n", tx_queue->queue);
- return 0;
- }
- }
-
- EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
- return -ETIMEDOUT;
}
void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -559,9 +511,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_desc_ptr;
- /* Stop the hardware using the queue */
- if (falcon_flush_tx_queue(tx_queue))
- EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
+ /* The queue should have been flushed */
+ WARN_ON(!tx_queue->flushed);
/* Remove TX descriptor ring from card */
EFX_ZERO_OWORD(tx_desc_ptr);
@@ -638,29 +589,28 @@ int falcon_probe_rx(struct efx_rx_queue *rx_queue)
sizeof(efx_qword_t));
}
-int falcon_init_rx(struct efx_rx_queue *rx_queue)
+void falcon_init_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
- int rc;
- int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
- int iscsi_digest_en = is_b0;
+ bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
+ bool iscsi_digest_en = is_b0;
EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
rx_queue->queue, rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+ rx_queue->flushed = false;
+
/* Pin RX descriptor ring */
- rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
- if (rc)
- return rc;
+ falcon_init_special_buffer(efx, &rx_queue->rxd);
/* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr,
RX_ISCSI_DDIG_EN, iscsi_digest_en,
RX_ISCSI_HDIG_EN, iscsi_digest_en,
RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
- RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
+ RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
RX_DESCQ_OWNER_ID, 0,
RX_DESCQ_LABEL, rx_queue->queue,
RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
@@ -670,14 +620,11 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
RX_DESCQ_EN, 1);
falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
rx_queue->queue);
- return 0;
}
-static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
+static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- struct efx_channel *channel = &efx->channel[0];
- unsigned int read_ptr, i;
efx_oword_t rx_flush_descq;
/* Post a flush command */
@@ -685,75 +632,15 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
RX_FLUSH_DESCQ_CMD, 1,
RX_FLUSH_DESCQ, rx_queue->queue);
falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
- msleep(FALCON_FLUSH_TIMEOUT);
-
- if (EFX_WORKAROUND_7803(efx))
- return 0;
-
- /* Look for a flush completed event */
- read_ptr = channel->eventq_read_ptr;
- for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
- efx_qword_t *event = falcon_event(channel, read_ptr);
- int ev_code, ev_sub_code, ev_queue, ev_failed;
- if (!falcon_event_present(event))
- break;
-
- ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
- ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
- ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
- ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
-
- if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
- (ev_queue == rx_queue->queue)) {
- if (ev_failed) {
- EFX_INFO(efx, "rx queue %d flush command "
- "failed\n", rx_queue->queue);
- return -EAGAIN;
- } else {
- EFX_LOG(efx, "rx queue %d flush command "
- "succesful\n", rx_queue->queue);
- return 0;
- }
- }
-
- read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
- }
-
- if (EFX_WORKAROUND_11557(efx)) {
- efx_oword_t reg;
- int enabled;
-
- falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
- rx_queue->queue);
- enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
- if (!enabled) {
- EFX_LOG(efx, "rx queue %d disabled without a "
- "flush event seen\n", rx_queue->queue);
- return 0;
- }
- }
-
- EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
- return -ETIMEDOUT;
}
void falcon_fini_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
- int i, rc;
- /* Try and flush the rx queue. This may need to be repeated */
- for (i = 0; i < 5; i++) {
- rc = falcon_flush_rx_queue(rx_queue);
- if (rc == -EAGAIN)
- continue;
- break;
- }
- if (rc) {
- EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
- efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
- }
+ /* The queue should already have been flushed */
+ WARN_ON(!rx_queue->flushed);
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
@@ -793,7 +680,7 @@ void falcon_eventq_read_ack(struct efx_channel *channel)
EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
- channel->evqnum);
+ channel->channel);
}
/* Use HW to insert a SW defined event */
@@ -802,7 +689,7 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
efx_oword_t drv_ev_reg;
EFX_POPULATE_OWORD_2(drv_ev_reg,
- DRV_EV_QID, channel->evqnum,
+ DRV_EV_QID, channel->channel,
DRV_EV_DATA,
EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
@@ -813,8 +700,8 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
* Falcon batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static inline void falcon_handle_tx_event(struct efx_channel *channel,
- efx_qword_t *event)
+static void falcon_handle_tx_event(struct efx_channel *channel,
+ efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
@@ -847,39 +734,19 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
}
}
-/* Check received packet's destination MAC address. */
-static int check_dest_mac(struct efx_rx_queue *rx_queue,
- const efx_qword_t *event)
-{
- struct efx_rx_buffer *rx_buf;
- struct efx_nic *efx = rx_queue->efx;
- int rx_ev_desc_ptr;
- struct ethhdr *eh;
-
- if (efx->promiscuous)
- return 1;
-
- rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
- rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
- eh = (struct ethhdr *)rx_buf->data;
- if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
- return 0;
- return 1;
-}
-
/* Detect errors included in the rx_evt_pkt_ok bit. */
static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
const efx_qword_t *event,
- unsigned *rx_ev_pkt_ok,
- int *discard, int byte_count)
+ bool *rx_ev_pkt_ok,
+ bool *discard)
{
struct efx_nic *efx = rx_queue->efx;
- unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
- unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
- unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
- unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
- int snap, non_ip;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
@@ -903,41 +770,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
- snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
- (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
- non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
-
- /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
- * length field of an LLC frame, which sets TOBE_DISC. We could set
- * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
- * protect the RX block).
- *
- * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
- * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
- * LLC can't encapsulate IP, so by definition
- * these packets are NON_IP.
- *
- * Unicast mismatch will also cause TOBE_DISC, so the driver needs
- * to check this.
- */
- if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
- /* If all the other flags are zero then we can state the
- * entire packet is ok, which will flag to the kernel not
- * to recalculate checksums.
- */
- if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
- *rx_ev_pkt_ok = 1;
-
- rx_ev_tobe_disc = 0;
-
- /* TOBE_DISC is set for unicast mismatch. But given that
- * we can't trust TOBE_DISC here, we must validate the dest
- * MAC address ourselves.
- */
- if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
- rx_ev_tobe_disc = 1;
- }
-
/* Count errors that are not in MAC stats. */
if (rx_ev_frm_trunc)
++rx_queue->channel->n_rx_frm_trunc;
@@ -961,7 +793,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
#ifdef EFX_ENABLE_DEBUG
if (rx_ev_other_err) {
EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
rx_queue->queue, EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
@@ -972,8 +804,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
rx_ev_drib_nib ? " [DRIB_NIB]" : "",
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
- rx_ev_pause_frm ? " [PAUSE]" : "",
- snap ? " [SNAP/LLC]" : "");
+ rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
@@ -1006,13 +837,13 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
* Also "is multicast" and "matches multicast filter" flags can be used to
* discard non-matching multicast packets.
*/
-static inline int falcon_handle_rx_event(struct efx_channel *channel,
- const efx_qword_t *event)
+static void falcon_handle_rx_event(struct efx_channel *channel,
+ const efx_qword_t *event)
{
- unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
- unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
- int discard = 0, checksummed;
+ bool rx_ev_pkt_ok, discard = false, checksummed;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
@@ -1022,16 +853,14 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
+ WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
- rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
- rx_queue = &efx->rx_queue[rx_ev_q_label];
+ rx_queue = &efx->rx_queue[channel->channel];
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
- if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
+ if (unlikely(rx_ev_desc_ptr != expected_ptr))
falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
- return rx_ev_q_label;
- }
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK and packet type is TCP/IPv4 or
@@ -1040,8 +869,8 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
} else {
falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
- &discard, rx_ev_byte_cnt);
- checksummed = 0;
+ &discard);
+ checksummed = false;
}
/* Detect multicast packets that didn't match the filter */
@@ -1051,14 +880,12 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
if (unlikely(!rx_ev_mcast_hash_match))
- discard = 1;
+ discard = true;
}
/* Handle received packet */
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
checksummed, discard);
-
- return rx_ev_q_label;
}
/* Global events are basically PHY events */
@@ -1066,23 +893,23 @@ static void falcon_handle_global_event(struct efx_channel *channel,
efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
- int is_phy_event = 0, handled = 0;
+ bool is_phy_event = false, handled = false;
/* Check for interrupt on either port. Some boards have a
* single PHY wired to the interrupt line for port 1. */
if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
EFX_QWORD_FIELD(*event, XG_PHY_INTR))
- is_phy_event = 1;
+ is_phy_event = true;
if ((falcon_rev(efx) >= FALCON_REV_B0) &&
- EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
- is_phy_event = 1;
+ EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0))
+ is_phy_event = true;
if (is_phy_event) {
efx->phy_op->clear_interrupt(efx);
queue_work(efx->workqueue, &efx->reconfigure_work);
- handled = 1;
+ handled = true;
}
if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
@@ -1092,7 +919,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
- handled = 1;
+ handled = true;
}
if (!handled)
@@ -1163,13 +990,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
}
}
-int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
+int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
{
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int rxq;
- int rxdmaqs = 0;
+ int rx_packets = 0;
read_ptr = channel->eventq_read_ptr;
@@ -1191,9 +1017,8 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
switch (ev_code) {
case RX_IP_EV_DECODE:
- rxq = falcon_handle_rx_event(channel, &event);
- rxdmaqs |= (1 << rxq);
- (*rx_quota)--;
+ falcon_handle_rx_event(channel, &event);
+ ++rx_packets;
break;
case TX_IP_EV_DECODE:
falcon_handle_tx_event(channel, &event);
@@ -1220,10 +1045,10 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
/* Increment read pointer */
read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
- } while (*rx_quota);
+ } while (rx_packets < rx_quota);
channel->eventq_read_ptr = read_ptr;
- return rxdmaqs;
+ return rx_packets;
}
void falcon_set_int_moderation(struct efx_channel *channel)
@@ -1251,7 +1076,7 @@ void falcon_set_int_moderation(struct efx_channel *channel)
TIMER_VAL, 0);
}
falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
- channel->evqnum);
+ channel->channel);
}
@@ -1265,20 +1090,17 @@ int falcon_probe_eventq(struct efx_channel *channel)
return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
}
-int falcon_init_eventq(struct efx_channel *channel)
+void falcon_init_eventq(struct efx_channel *channel)
{
efx_oword_t evq_ptr;
struct efx_nic *efx = channel->efx;
- int rc;
EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
channel->channel, channel->eventq.index,
channel->eventq.index + channel->eventq.entries - 1);
/* Pin event queue buffer */
- rc = falcon_init_special_buffer(efx, &channel->eventq);
- if (rc)
- return rc;
+ falcon_init_special_buffer(efx, &channel->eventq);
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.addr, 0xff, channel->eventq.len);
@@ -1289,11 +1111,9 @@ int falcon_init_eventq(struct efx_channel *channel)
EVQ_SIZE, FALCON_EVQ_ORDER,
EVQ_BUF_BASE_ID, channel->eventq.index);
falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
- channel->evqnum);
+ channel->channel);
falcon_set_int_moderation(channel);
-
- return 0;
}
void falcon_fini_eventq(struct efx_channel *channel)
@@ -1304,7 +1124,7 @@ void falcon_fini_eventq(struct efx_channel *channel)
/* Remove event queue from card */
EFX_ZERO_OWORD(eventq_ptr);
falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
- channel->evqnum);
+ channel->channel);
/* Unpin event queue */
falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1331,6 +1151,121 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
falcon_generate_event(channel, &test_event);
}
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+
+static void falcon_poll_flush_events(struct efx_nic *efx)
+{
+ struct efx_channel *channel = &efx->channel[0];
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ unsigned int read_ptr, i;
+
+ read_ptr = channel->eventq_read_ptr;
+ for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
+ efx_qword_t *event = falcon_event(channel, read_ptr);
+ int ev_code, ev_sub_code, ev_queue;
+ bool ev_failed;
+ if (!falcon_event_present(event))
+ break;
+
+ ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
+ if (ev_code != DRIVER_EV_DECODE)
+ continue;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
+ switch (ev_sub_code) {
+ case TX_DESCQ_FLS_DONE_EV_DECODE:
+ ev_queue = EFX_QWORD_FIELD(*event,
+ DRIVER_EV_TX_DESCQ_ID);
+ if (ev_queue < EFX_TX_QUEUE_COUNT) {
+ tx_queue = efx->tx_queue + ev_queue;
+ tx_queue->flushed = true;
+ }
+ break;
+ case RX_DESCQ_FLS_DONE_EV_DECODE:
+ ev_queue = EFX_QWORD_FIELD(*event,
+ DRIVER_EV_RX_DESCQ_ID);
+ ev_failed = EFX_QWORD_FIELD(*event,
+ DRIVER_EV_RX_FLUSH_FAIL);
+ if (ev_queue < efx->n_rx_queues) {
+ rx_queue = efx->rx_queue + ev_queue;
+
+ /* retry the rx flush */
+ if (ev_failed)
+ falcon_flush_rx_queue(rx_queue);
+ else
+ rx_queue->flushed = true;
+ }
+ break;
+ }
+
+ read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
+ }
+}
+
+/* Handle tx and rx flushes at the same time, since they run in
+ * parallel in the hardware and there's no reason for us to
+ * serialise them */
+int falcon_flush_queues(struct efx_nic *efx)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int i;
+ bool outstanding;
+
+ /* Issue flush requests */
+ efx_for_each_tx_queue(tx_queue, efx) {
+ tx_queue->flushed = false;
+ falcon_flush_tx_queue(tx_queue);
+ }
+ efx_for_each_rx_queue(rx_queue, efx) {
+ rx_queue->flushed = false;
+ falcon_flush_rx_queue(rx_queue);
+ }
+
+ /* Poll the evq looking for flush completions. Since we're not pushing
+ * any more rx or tx descriptors at this point, we're in no danger of
+ * overflowing the evq whilst we wait */
+ for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
+ msleep(FALCON_FLUSH_INTERVAL);
+ falcon_poll_flush_events(efx);
+
+ /* Check if every queue has been succesfully flushed */
+ outstanding = false;
+ efx_for_each_tx_queue(tx_queue, efx)
+ outstanding |= !tx_queue->flushed;
+ efx_for_each_rx_queue(rx_queue, efx)
+ outstanding |= !rx_queue->flushed;
+ if (!outstanding)
+ return 0;
+ }
+
+ /* Mark the queues as all flushed. We're going to return failure
+ * leading to a reset, or fake up success anyway. "flushed" now
+ * indicates that we tried to flush. */
+ efx_for_each_tx_queue(tx_queue, efx) {
+ if (!tx_queue->flushed)
+ EFX_ERR(efx, "tx queue %d flush command timed out\n",
+ tx_queue->queue);
+ tx_queue->flushed = true;
+ }
+ efx_for_each_rx_queue(rx_queue, efx) {
+ if (!rx_queue->flushed)
+ EFX_ERR(efx, "rx queue %d flush command timed out\n",
+ rx_queue->queue);
+ rx_queue->flushed = true;
+ }
+
+ if (EFX_WORKAROUND_7803(efx))
+ return 0;
+
+ return -ETIMEDOUT;
+}
/**************************************************************************
*
@@ -1371,7 +1306,7 @@ void falcon_enable_interrupts(struct efx_nic *efx)
/* Force processing of all the channels to get the EVQ RPTRs up to
date */
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel(channel, efx)
efx_schedule_channel(channel);
}
@@ -1439,10 +1374,11 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
}
- /* Disable DMA bus mastering on both devices */
+ /* Disable both devices */
pci_disable_device(efx->pci_dev);
if (FALCON_IS_DUAL_FUNC(efx))
pci_disable_device(nic_data->pci_dev2);
+ falcon_disable_interrupts(efx);
if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
@@ -1589,7 +1525,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
offset < RX_RSS_INDIR_TBL_B0 + 0x800;
offset += 0x10) {
EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
- i % efx->rss_queues);
+ i % efx->n_rx_queues);
falcon_writel(efx, &dword, offset);
i++;
}
@@ -1621,7 +1557,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
}
/* Hook MSI or MSI-X interrupt */
- efx_for_each_channel_with_interrupt(channel, efx) {
+ efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, falcon_msi_interrupt,
IRQF_PROBE_SHARED, /* Not shared */
efx->name, channel);
@@ -1634,7 +1570,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
return 0;
fail2:
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel(channel, efx)
free_irq(channel->irq, channel);
fail1:
return rc;
@@ -1646,7 +1582,7 @@ void falcon_fini_interrupt(struct efx_nic *efx)
efx_oword_t reg;
/* Disable MSI/MSI-X interrupts */
- efx_for_each_channel_with_interrupt(channel, efx) {
+ efx_for_each_channel(channel, efx) {
if (channel->irq)
free_irq(channel->irq, channel);
}
@@ -1669,69 +1605,200 @@ void falcon_fini_interrupt(struct efx_nic *efx)
**************************************************************************
*/
-#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
+#define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t))
/* Wait for SPI command completion */
static int falcon_spi_wait(struct efx_nic *efx)
{
+ unsigned long timeout = jiffies + DIV_ROUND_UP(HZ, 10);
efx_oword_t reg;
- int cmd_en, timer_active;
- int count;
+ bool cmd_en, timer_active;
- count = 0;
- do {
+ for (;;) {
falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
if (!cmd_en && !timer_active)
return 0;
- udelay(10);
- } while (++count < 10000); /* wait upto 100msec */
- EFX_ERR(efx, "timed out waiting for SPI\n");
- return -ETIMEDOUT;
+ if (time_after_eq(jiffies, timeout)) {
+ EFX_ERR(efx, "timed out waiting for SPI\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
}
-static int
-falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command,
- unsigned int address, unsigned int addr_len,
- void *data, unsigned int len)
+static int falcon_spi_cmd(const struct efx_spi_device *spi,
+ unsigned int command, int address,
+ const void *in, void *out, unsigned int len)
{
+ struct efx_nic *efx = spi->efx;
+ bool addressed = (address >= 0);
+ bool reading = (out != NULL);
efx_oword_t reg;
int rc;
- BUG_ON(len > FALCON_SPI_MAX_LEN);
+ /* Input validation */
+ if (len > FALCON_SPI_MAX_LEN)
+ return -EINVAL;
/* Check SPI not currently being accessed */
rc = falcon_spi_wait(efx);
if (rc)
return rc;
- /* Program address register */
- EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
- falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
+ /* Program address register, if we have an address */
+ if (addressed) {
+ EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
+ falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
+ }
+
+ /* Program data register, if we have data */
+ if (in != NULL) {
+ memcpy(&reg, in, len);
+ falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
+ }
- /* Issue read command */
+ /* Issue read/write command */
EFX_POPULATE_OWORD_7(reg,
EE_SPI_HCMD_CMD_EN, 1,
- EE_SPI_HCMD_SF_SEL, device_id,
+ EE_SPI_HCMD_SF_SEL, spi->device_id,
EE_SPI_HCMD_DABCNT, len,
- EE_SPI_HCMD_READ, EE_SPI_READ,
+ EE_SPI_HCMD_READ, reading,
EE_SPI_HCMD_DUBCNT, 0,
- EE_SPI_HCMD_ADBCNT, addr_len,
+ EE_SPI_HCMD_ADBCNT,
+ (addressed ? spi->addr_len : 0),
EE_SPI_HCMD_ENC, command);
falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
- /* Wait for read to complete */
+ /* Wait for read/write to complete */
rc = falcon_spi_wait(efx);
if (rc)
return rc;
/* Read data */
- falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
- memcpy(data, &reg, len);
+ if (out != NULL) {
+ falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
+ memcpy(out, &reg, len);
+ }
+
return 0;
}
+static unsigned int
+falcon_spi_write_limit(const struct efx_spi_device *spi, unsigned int start)
+{
+ return min(FALCON_SPI_MAX_LEN,
+ (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+static inline u8
+efx_spi_munge_command(const struct efx_spi_device *spi,
+ const u8 command, const unsigned int address)
+{
+ return command | (((address >> 8) & spi->munge_address) << 3);
+}
+
+
+static int falcon_spi_fast_wait(const struct efx_spi_device *spi)
+{
+ u8 status;
+ int i, rc;
+
+ /* Wait up to 1000us for flash/EEPROM to finish a fast operation. */
+ for (i = 0; i < 50; i++) {
+ udelay(20);
+
+ rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ }
+ EFX_ERR(spi->efx,
+ "timed out waiting for device %d last status=0x%02x\n",
+ spi->device_id, status);
+ return -ETIMEDOUT;
+}
+
+int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ unsigned int command, block_len, pos = 0;
+ int rc = 0;
+
+ while (pos < len) {
+ block_len = min((unsigned int)len - pos,
+ FALCON_SPI_MAX_LEN);
+
+ command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ rc = falcon_spi_cmd(spi, command, start + pos, NULL,
+ buffer + pos, block_len);
+ if (rc)
+ break;
+ pos += block_len;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (retlen)
+ *retlen = pos;
+ return rc;
+}
+
+int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ u8 verify_buffer[FALCON_SPI_MAX_LEN];
+ unsigned int command, block_len, pos = 0;
+ int rc = 0;
+
+ while (pos < len) {
+ rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ break;
+
+ block_len = min((unsigned int)len - pos,
+ falcon_spi_write_limit(spi, start + pos));
+ command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
+ rc = falcon_spi_cmd(spi, command, start + pos,
+ buffer + pos, NULL, block_len);
+ if (rc)
+ break;
+
+ rc = falcon_spi_fast_wait(spi);
+ if (rc)
+ break;
+
+ command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ rc = falcon_spi_cmd(spi, command, start + pos,
+ NULL, verify_buffer, block_len);
+ if (memcmp(verify_buffer, buffer + pos, block_len)) {
+ rc = -EIO;
+ break;
+ }
+
+ pos += block_len;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (retlen)
+ *retlen = pos;
+ return rc;
+}
+
/**************************************************************************
*
* MAC wrapper
@@ -1812,7 +1879,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
efx_oword_t reg;
int link_speed;
- unsigned int tx_fc;
+ bool tx_fc;
if (efx->link_options & GM_LPA_10000)
link_speed = 0x3;
@@ -1847,7 +1914,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
/* Transmission of pause frames when RX crosses the threshold is
* covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
* Action on receipt of pause frames is controller by XM_DIS_FCNTL */
- tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
+ tx_fc = !!(efx->flow_control & EFX_FC_TX);
falcon_read(efx, &reg, RX_CFG_REG_KER);
EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
@@ -1887,8 +1954,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
/* Wait for transfer to complete */
for (i = 0; i < 400; i++) {
- if (*(volatile u32 *)dma_done == FALCON_STATS_DONE)
+ if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
+ rmb(); /* Ensure the stats are valid. */
return 0;
+ }
udelay(10);
}
@@ -1951,7 +2020,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
int addr, int value)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg;
@@ -2019,7 +2088,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
* could be read, -1 will be returned. */
static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
{
- struct efx_nic *efx = net_dev->priv;
+ struct efx_nic *efx = netdev_priv(net_dev);
unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg;
int value = -1;
@@ -2120,7 +2189,7 @@ int falcon_probe_port(struct efx_nic *efx)
return rc;
/* Set up GMII structure for PHY */
- efx->mii.supports_gmii = 1;
+ efx->mii.supports_gmii = true;
falcon_init_mdio(&efx->mii);
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
@@ -2168,6 +2237,170 @@ void falcon_set_multicast_hash(struct efx_nic *efx)
falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
}
+
+/**************************************************************************
+ *
+ * Falcon test code
+ *
+ **************************************************************************/
+
+int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
+{
+ struct falcon_nvconfig *nvconfig;
+ struct efx_spi_device *spi;
+ void *region;
+ int rc, magic_num, struct_ver;
+ __le16 *word, *limit;
+ u32 csum;
+
+ region = kmalloc(NVCONFIG_END, GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ nvconfig = region + NVCONFIG_OFFSET;
+
+ spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
+ rc = falcon_spi_read(spi, 0, NVCONFIG_END, NULL, region);
+ if (rc) {
+ EFX_ERR(efx, "Failed to read %s\n",
+ efx->spi_flash ? "flash" : "EEPROM");
+ rc = -EIO;
+ goto out;
+ }
+
+ magic_num = le16_to_cpu(nvconfig->board_magic_num);
+ struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
+
+ rc = -EINVAL;
+ if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
+ EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
+ goto out;
+ }
+ if (struct_ver < 2) {
+ EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
+ goto out;
+ } else if (struct_ver < 4) {
+ word = &nvconfig->board_magic_num;
+ limit = (__le16 *) (nvconfig + 1);
+ } else {
+ word = region;
+ limit = region + NVCONFIG_END;
+ }
+ for (csum = 0; word < limit; ++word)
+ csum += le16_to_cpu(*word);
+
+ if (~csum & 0xffff) {
+ EFX_ERR(efx, "NVRAM has incorrect checksum\n");
+ goto out;
+ }
+
+ rc = 0;
+ if (nvconfig_out)
+ memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
+
+ out:
+ kfree(region);
+ return rc;
+}
+
+/* Registers tested in the falcon register test */
+static struct {
+ unsigned address;
+ efx_oword_t mask;
+} efx_test_registers[] = {
+ { ADR_REGION_REG_KER,
+ EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ { RX_CFG_REG_KER,
+ EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
+ { TX_CFG_REG_KER,
+ EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
+ { TX_CFG2_REG_KER,
+ EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+ { MAC0_CTRL_REG_KER,
+ EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
+ { SRM_TX_DC_CFG_REG_KER,
+ EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { RX_DC_CFG_REG_KER,
+ EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
+ { RX_DC_PF_WM_REG_KER,
+ EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+ { DP_CTRL_REG,
+ EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+ { XM_GLB_CFG_REG,
+ EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
+ { XM_TX_CFG_REG,
+ EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
+ { XM_RX_CFG_REG,
+ EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
+ { XM_RX_PARAM_REG,
+ EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
+ { XM_FC_REG,
+ EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
+ { XM_ADR_LO_REG,
+ EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { XX_SD_CTL_REG,
+ EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
+};
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int falcon_test_registers(struct efx_nic *efx)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ /* Falcon should be in loopback to isolate the XMAC from the PHY */
+ WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+ for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
+ address = efx_test_registers[i].address;
+ mask = imask = efx_test_registers[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ falcon_read(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ falcon_write(efx, &reg, address);
+ falcon_read(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ falcon_write(efx, &reg, address);
+ falcon_read(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ falcon_write(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
/**************************************************************************
*
* Device reset
@@ -2305,68 +2538,103 @@ static int falcon_reset_sram(struct efx_nic *efx)
return -ETIMEDOUT;
}
+static int falcon_spi_device_init(struct efx_nic *efx,
+ struct efx_spi_device **spi_device_ret,
+ unsigned int device_id, u32 device_type)
+{
+ struct efx_spi_device *spi_device;
+
+ if (device_type != 0) {
+ spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL);
+ if (!spi_device)
+ return -ENOMEM;
+ spi_device->device_id = device_id;
+ spi_device->size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
+ spi_device->addr_len =
+ SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
+ spi_device->munge_address = (spi_device->size == 1 << 9 &&
+ spi_device->addr_len == 1);
+ spi_device->block_size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type,
+ SPI_DEV_TYPE_BLOCK_SIZE);
+
+ spi_device->efx = efx;
+ } else {
+ spi_device = NULL;
+ }
+
+ kfree(*spi_device_ret);
+ *spi_device_ret = spi_device;
+ return 0;
+}
+
+
+static void falcon_remove_spi_devices(struct efx_nic *efx)
+{
+ kfree(efx->spi_eeprom);
+ efx->spi_eeprom = NULL;
+ kfree(efx->spi_flash);
+ efx->spi_flash = NULL;
+}
+
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
struct falcon_nvconfig *nvconfig;
- efx_oword_t nic_stat;
- int device_id;
- unsigned addr_len;
- size_t offset, len;
- int magic_num, struct_ver, board_rev;
+ int board_rev;
int rc;
- /* Find the boot device. */
- falcon_read(efx, &nic_stat, NIC_STAT_REG);
- if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
- device_id = EE_SPI_FLASH;
- addr_len = 3;
- } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
- device_id = EE_SPI_EEPROM;
- addr_len = 2;
- } else {
- return -ENODEV;
- }
-
nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
+ if (!nvconfig)
+ return -ENOMEM;
- /* Read the whole configuration structure into memory. */
- for (offset = 0; offset < sizeof(*nvconfig); offset += len) {
- len = min(sizeof(*nvconfig) - offset,
- (size_t) FALCON_SPI_MAX_LEN);
- rc = falcon_spi_read(efx, device_id, SPI_READ,
- NVCONFIG_BASE + offset, addr_len,
- (char *)nvconfig + offset, len);
- if (rc)
- goto out;
- }
-
- /* Read the MAC addresses */
- memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
-
- /* Read the board configuration. */
- magic_num = le16_to_cpu(nvconfig->board_magic_num);
- struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
-
- if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
- EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
- "therefore using defaults\n", magic_num, struct_ver);
+ rc = falcon_read_nvram(efx, nvconfig);
+ if (rc == -EINVAL) {
+ EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mii.phy_id = PHY_ADDR_INVALID;
board_rev = 0;
+ rc = 0;
+ } else if (rc) {
+ goto fail1;
} else {
struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
+ struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
efx->phy_type = v2->port0_phy_type;
efx->mii.phy_id = v2->port0_phy_addr;
board_rev = le16_to_cpu(v2->board_revision);
+
+ if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
+ __le32 fl = v3->spi_device_type[EE_SPI_FLASH];
+ __le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
+ rc = falcon_spi_device_init(efx, &efx->spi_flash,
+ EE_SPI_FLASH,
+ le32_to_cpu(fl));
+ if (rc)
+ goto fail2;
+ rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
+ EE_SPI_EEPROM,
+ le32_to_cpu(ee));
+ if (rc)
+ goto fail2;
+ }
}
+ /* Read the MAC addresses */
+ memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
+
EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
efx_set_board_info(efx, board_rev);
- out:
+ kfree(nvconfig);
+ return 0;
+
+ fail2:
+ falcon_remove_spi_devices(efx);
+ fail1:
kfree(nvconfig);
return rc;
}
@@ -2417,6 +2685,86 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
return 0;
}
+/* Probe all SPI devices on the NIC */
+static void falcon_probe_spi_devices(struct efx_nic *efx)
+{
+ efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
+ bool has_flash, has_eeprom, boot_is_external;
+
+ falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
+ falcon_read(efx, &nic_stat, NIC_STAT_REG);
+ falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+
+ has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST);
+ has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST);
+ boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE);
+
+ if (has_flash) {
+ /* Default flash SPI device: Atmel AT25F1024
+ * 128 KB, 24-bit address, 32 KB erase block,
+ * 256 B write block
+ */
+ u32 flash_device_type =
+ (17 << SPI_DEV_TYPE_SIZE_LBN)
+ | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+ | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
+ | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
+ | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
+
+ falcon_spi_device_init(efx, &efx->spi_flash,
+ EE_SPI_FLASH, flash_device_type);
+
+ if (!boot_is_external) {
+ /* Disable VPD and set clock dividers to safe
+ * values for initial programming.
+ */
+ EFX_LOG(efx, "Booted from internal ASIC settings;"
+ " setting SPI config\n");
+ EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
+ /* 125 MHz / 7 ~= 20 MHz */
+ EE_SF_CLOCK_DIV, 7,
+ /* 125 MHz / 63 ~= 2 MHz */
+ EE_EE_CLOCK_DIV, 63);
+ falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+ }
+ }
+
+ if (has_eeprom) {
+ u32 eeprom_device_type;
+
+ /* If it has no flash, it must have a large EEPROM
+ * for chip config; otherwise check whether 9-bit
+ * addressing is used for VPD configuration
+ */
+ if (has_flash &&
+ (!boot_is_external ||
+ EFX_OWORD_FIELD(ee_vpd_cfg, EE_VPD_EN_AD9_MODE))) {
+ /* Default SPI device: Atmel AT25040 or similar
+ * 512 B, 9-bit address, 8 B write block
+ */
+ eeprom_device_type =
+ (9 << SPI_DEV_TYPE_SIZE_LBN)
+ | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+ | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
+ } else {
+ /* "Large" SPI device: Atmel AT25640 or similar
+ * 8 KB, 16-bit address, 32 B write block
+ */
+ eeprom_device_type =
+ (13 << SPI_DEV_TYPE_SIZE_LBN)
+ | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+ | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
+ }
+
+ falcon_spi_device_init(efx, &efx->spi_eeprom,
+ EE_SPI_EEPROM, eeprom_device_type);
+ }
+
+ EFX_LOG(efx, "flash is %s, EEPROM is %s\n",
+ (has_flash ? "present" : "absent"),
+ (has_eeprom ? "present" : "absent"));
+}
+
int falcon_probe_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data;
@@ -2424,6 +2772,8 @@ int falcon_probe_nic(struct efx_nic *efx)
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
efx->nic_data = nic_data;
/* Determine number of ports etc. */
@@ -2467,6 +2817,8 @@ int falcon_probe_nic(struct efx_nic *efx)
(unsigned long long)efx->irq_status.dma_addr,
efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
+ falcon_probe_spi_devices(efx);
+
/* Read in the non-volatile configuration */
rc = falcon_probe_nvconfig(efx);
if (rc)
@@ -2486,6 +2838,7 @@ int falcon_probe_nic(struct efx_nic *efx)
return 0;
fail5:
+ falcon_remove_spi_devices(efx);
falcon_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
@@ -2573,19 +2926,14 @@ int falcon_init_nic(struct efx_nic *efx)
EFX_INVERT_OWORD(temp);
falcon_write(efx, &temp, FATAL_INTR_REG_KER);
- /* Set number of RSS queues for receive path. */
- falcon_read(efx, &temp, RX_FILTER_CTL_REG);
- if (falcon_rev(efx) >= FALCON_REV_B0)
- EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
- else
- EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
if (EFX_WORKAROUND_7244(efx)) {
+ falcon_read(efx, &temp, RX_FILTER_CTL_REG);
EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
+ falcon_write(efx, &temp, RX_FILTER_CTL_REG);
}
- falcon_write(efx, &temp, RX_FILTER_CTL_REG);
falcon_setup_rss_indir_table(efx);
@@ -2641,8 +2989,8 @@ int falcon_init_nic(struct efx_nic *efx)
rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
/* RX control FIFO thresholds [32 entries] */
- EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25);
- EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20);
+ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
+ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
falcon_write(efx, &temp, RX_CFG_REG_KER);
/* Set destination of both TX and RX Flush events */
@@ -2662,6 +3010,7 @@ void falcon_remove_nic(struct efx_nic *efx)
rc = i2c_del_adapter(&efx->i2c_adap);
BUG_ON(rc);
+ falcon_remove_spi_devices(efx);
falcon_free_buffer(efx, &efx->irq_status);
falcon_reset_hw(efx, RESET_TYPE_ALL);
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 492f9bc2884..be025ba7a6c 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -40,24 +40,24 @@ extern struct efx_nic_type falcon_b_nic_type;
/* TX data path */
extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
-extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
+extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
/* RX data path */
extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
-extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
+extern void falcon_init_rx(struct efx_rx_queue *rx_queue);
extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
/* Event data path */
extern int falcon_probe_eventq(struct efx_channel *channel);
-extern int falcon_init_eventq(struct efx_channel *channel);
+extern void falcon_init_eventq(struct efx_channel *channel);
extern void falcon_fini_eventq(struct efx_channel *channel);
extern void falcon_remove_eventq(struct efx_channel *channel);
-extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
+extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota);
extern void falcon_eventq_read_ack(struct efx_channel *channel);
/* Ports */
@@ -65,7 +65,7 @@ extern int falcon_probe_port(struct efx_nic *efx);
extern void falcon_remove_port(struct efx_nic *efx);
/* MAC/PHY */
-extern int falcon_xaui_link_ok(struct efx_nic *efx);
+extern bool falcon_xaui_link_ok(struct efx_nic *efx);
extern int falcon_dma_stats(struct efx_nic *efx,
unsigned int done_offset);
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
@@ -86,6 +86,7 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
extern int falcon_probe_nic(struct efx_nic *efx);
extern int falcon_probe_resources(struct efx_nic *efx);
extern int falcon_init_nic(struct efx_nic *efx);
+extern int falcon_flush_queues(struct efx_nic *efx);
extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
extern void falcon_remove_resources(struct efx_nic *efx);
extern void falcon_remove_nic(struct efx_nic *efx);
@@ -93,6 +94,12 @@ extern void falcon_update_nic_stats(struct efx_nic *efx);
extern void falcon_set_multicast_hash(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
+/* Tests */
+struct falcon_nvconfig;
+extern int falcon_read_nvram(struct efx_nic *efx,
+ struct falcon_nvconfig *nvconfig);
+extern int falcon_test_registers(struct efx_nic *efx);
+
/**************************************************************************
*
* Falcon MAC stats
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 6d003114eea..5d584b0dbb5 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -92,6 +92,17 @@
/* SPI host data register */
#define EE_SPI_HDATA_REG_KER 0x0120
+/* SPI/VPD config register */
+#define EE_VPD_CFG_REG_KER 0x0140
+#define EE_VPD_EN_LBN 0
+#define EE_VPD_EN_WIDTH 1
+#define EE_VPD_EN_AD9_MODE_LBN 1
+#define EE_VPD_EN_AD9_MODE_WIDTH 1
+#define EE_EE_CLOCK_DIV_LBN 112
+#define EE_EE_CLOCK_DIV_WIDTH 7
+#define EE_SF_CLOCK_DIV_LBN 120
+#define EE_SF_CLOCK_DIV_WIDTH 7
+
/* PCIE CORE ACCESS REG */
#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
@@ -106,7 +117,6 @@
#define SF_PRST_WIDTH 1
#define EE_PRST_LBN 8
#define EE_PRST_WIDTH 1
-/* See pic_mode_t for decoding of this field */
/* These bit definitions are extrapolated from the list of numerical
* values for STRAP_PINS.
*/
@@ -115,6 +125,9 @@
#define STRAP_PCIE_LBN 0
#define STRAP_PCIE_WIDTH 1
+#define BOOTED_USING_NVDEVICE_LBN 3
+#define BOOTED_USING_NVDEVICE_WIDTH 1
+
/* GPIO control register */
#define GPIO_CTL_REG_KER 0x0210
#define GPIO_OUTPUTS_LBN (16)
@@ -479,18 +492,8 @@
#define MAC_MCAST_HASH_REG0_KER 0xca0
#define MAC_MCAST_HASH_REG1_KER 0xcb0
-/* GMAC registers */
-#define FALCON_GMAC_REGBANK 0xe00
-#define FALCON_GMAC_REGBANK_SIZE 0x200
-#define FALCON_GMAC_REG_SIZE 0x10
-
-/* XMAC registers */
-#define FALCON_XMAC_REGBANK 0x1200
-#define FALCON_XMAC_REGBANK_SIZE 0x200
-#define FALCON_XMAC_REG_SIZE 0x10
-
/* XGMAC address register low */
-#define XM_ADR_LO_REG_MAC 0x00
+#define XM_ADR_LO_REG 0x1200
#define XM_ADR_3_LBN 24
#define XM_ADR_3_WIDTH 8
#define XM_ADR_2_LBN 16
@@ -501,14 +504,14 @@
#define XM_ADR_0_WIDTH 8
/* XGMAC address register high */
-#define XM_ADR_HI_REG_MAC 0x01
+#define XM_ADR_HI_REG 0x1210
#define XM_ADR_5_LBN 8
#define XM_ADR_5_WIDTH 8
#define XM_ADR_4_LBN 0
#define XM_ADR_4_WIDTH 8
/* XGMAC global configuration */
-#define XM_GLB_CFG_REG_MAC 0x02
+#define XM_GLB_CFG_REG 0x1220
#define XM_RX_STAT_EN_LBN 11
#define XM_RX_STAT_EN_WIDTH 1
#define XM_TX_STAT_EN_LBN 10
@@ -521,7 +524,7 @@
#define XM_CORE_RST_WIDTH 1
/* XGMAC transmit configuration */
-#define XM_TX_CFG_REG_MAC 0x03
+#define XM_TX_CFG_REG 0x1230
#define XM_IPG_LBN 16
#define XM_IPG_WIDTH 4
#define XM_FCNTL_LBN 10
@@ -536,7 +539,7 @@
#define XM_TXEN_WIDTH 1
/* XGMAC receive configuration */
-#define XM_RX_CFG_REG_MAC 0x04
+#define XM_RX_CFG_REG 0x1240
#define XM_PASS_CRC_ERR_LBN 25
#define XM_PASS_CRC_ERR_WIDTH 1
#define XM_ACPT_ALL_MCAST_LBN 11
@@ -549,7 +552,7 @@
#define XM_RXEN_WIDTH 1
/* XGMAC management interrupt mask register */
-#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
+#define XM_MGT_INT_MSK_REG_B0 0x1250
#define XM_MSK_PRMBLE_ERR_LBN 2
#define XM_MSK_PRMBLE_ERR_WIDTH 1
#define XM_MSK_RMTFLT_LBN 1
@@ -558,29 +561,29 @@
#define XM_MSK_LCLFLT_WIDTH 1
/* XGMAC flow control register */
-#define XM_FC_REG_MAC 0x7
+#define XM_FC_REG 0x1270
#define XM_PAUSE_TIME_LBN 16
#define XM_PAUSE_TIME_WIDTH 16
#define XM_DIS_FCNTL_LBN 0
#define XM_DIS_FCNTL_WIDTH 1
/* XGMAC pause time count register */
-#define XM_PAUSE_TIME_REG_MAC 0x9
+#define XM_PAUSE_TIME_REG 0x1290
/* XGMAC transmit parameter register */
-#define XM_TX_PARAM_REG_MAC 0x0d
+#define XM_TX_PARAM_REG 0x012d0
#define XM_TX_JUMBO_MODE_LBN 31
#define XM_TX_JUMBO_MODE_WIDTH 1
#define XM_MAX_TX_FRM_SIZE_LBN 16
#define XM_MAX_TX_FRM_SIZE_WIDTH 14
/* XGMAC receive parameter register */
-#define XM_RX_PARAM_REG_MAC 0x0e
+#define XM_RX_PARAM_REG 0x12e0
#define XM_MAX_RX_FRM_SIZE_LBN 0
#define XM_MAX_RX_FRM_SIZE_WIDTH 14
/* XGMAC management interrupt status register */
-#define XM_MGT_INT_REG_MAC_B0 0x0f
+#define XM_MGT_INT_REG_B0 0x12f0
#define XM_PRMBLE_ERR 2
#define XM_PRMBLE_WIDTH 1
#define XM_RMTFLT_LBN 1
@@ -589,7 +592,7 @@
#define XM_LCLFLT_WIDTH 1
/* XGXS/XAUI powerdown/reset register */
-#define XX_PWR_RST_REG_MAC 0x10
+#define XX_PWR_RST_REG 0x1300
#define XX_PWRDND_EN_LBN 15
#define XX_PWRDND_EN_WIDTH 1
@@ -619,7 +622,7 @@
#define XX_RST_XX_EN_WIDTH 1
/* XGXS/XAUI powerdown/reset control register */
-#define XX_SD_CTL_REG_MAC 0x11
+#define XX_SD_CTL_REG 0x1310
#define XX_HIDRVD_LBN 15
#define XX_HIDRVD_WIDTH 1
#define XX_LODRVD_LBN 14
@@ -645,7 +648,7 @@
#define XX_LPBKA_LBN 0
#define XX_LPBKA_WIDTH 1
-#define XX_TXDRV_CTL_REG_MAC 0x12
+#define XX_TXDRV_CTL_REG 0x1320
#define XX_DEQD_LBN 28
#define XX_DEQD_WIDTH 4
#define XX_DEQC_LBN 24
@@ -664,7 +667,7 @@
#define XX_DTXA_WIDTH 4
/* XAUI XGXS core status register */
-#define XX_CORE_STAT_REG_MAC 0x16
+#define XX_CORE_STAT_REG 0x1360
#define XX_FORCE_SIG_LBN 24
#define XX_FORCE_SIG_WIDTH 8
#define XX_FORCE_SIG_DECODE_FORCED 0xff
@@ -1127,7 +1130,28 @@ struct falcon_nvconfig_board_v2 {
__le16 board_revision;
} __packed;
-#define NVCONFIG_BASE 0x300
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+ __le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field) \
+ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define NVCONFIG_OFFSET 0x300
+#define NVCONFIG_END 0x400
+
#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
struct falcon_nvconfig {
efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
@@ -1144,6 +1168,8 @@ struct falcon_nvconfig {
__le16 board_struct_ver;
__le16 board_checksum;
struct falcon_nvconfig_board_v2 board_v2;
+ efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ struct falcon_nvconfig_board_v3 board_v3;
} __packed;
#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index 6670cdfc41a..c16da3149fa 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -13,7 +13,6 @@
#include <linux/io.h>
#include <linux/spinlock.h>
-#include "net_driver.h"
/**************************************************************************
*
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 55c0d9760be..d4012314dd0 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -23,56 +23,24 @@
/**************************************************************************
*
- * MAC register access
- *
- **************************************************************************/
-
-/* Offset of an XMAC register within Falcon */
-#define FALCON_XMAC_REG(mac_reg) \
- (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
-
-void falcon_xmac_writel(struct efx_nic *efx,
- efx_dword_t *value, unsigned int mac_reg)
-{
- efx_oword_t temp;
-
- EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
- falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
-}
-
-void falcon_xmac_readl(struct efx_nic *efx,
- efx_dword_t *value, unsigned int mac_reg)
-{
- efx_oword_t temp;
-
- falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
- EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
-}
-
-/**************************************************************************
- *
* MAC operations
*
*************************************************************************/
static int falcon_reset_xmac(struct efx_nic *efx)
{
- efx_dword_t reg;
+ efx_oword_t reg;
int count;
- EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1);
- falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
+ EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
+ falcon_write(efx, &reg, XM_GLB_CFG_REG);
for (count = 0; count < 10000; count++) { /* wait upto 100ms */
- falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC);
- if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0)
+ falcon_read(efx, &reg, XM_GLB_CFG_REG);
+ if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
return 0;
udelay(10);
}
- /* This often fails when DSP is disabled, ignore it */
- if (sfe4001_phy_flash_cfg != 0)
- return 0;
-
EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
return -ETIMEDOUT;
}
@@ -80,25 +48,25 @@ static int falcon_reset_xmac(struct efx_nic *efx)
/* Configure the XAUI driver that is an output from Falcon */
static void falcon_setup_xaui(struct efx_nic *efx)
{
- efx_dword_t sdctl, txdrv;
+ efx_oword_t sdctl, txdrv;
/* Move the XAUI into low power, unless there is no PHY, in
* which case the XAUI will have to drive a cable. */
if (efx->phy_type == PHY_TYPE_NONE)
return;
- falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC);
- EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
- EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
- EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
- EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
- EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
- EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
- EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
- EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
- falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC);
-
- EFX_POPULATE_DWORD_8(txdrv,
+ falcon_read(efx, &sdctl, XX_SD_CTL_REG);
+ EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
+ EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
+ EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
+ EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
+ EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
+ EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
+ EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
+ EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
+ falcon_write(efx, &sdctl, XX_SD_CTL_REG);
+
+ EFX_POPULATE_OWORD_8(txdrv,
XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
@@ -107,93 +75,21 @@ static void falcon_setup_xaui(struct efx_nic *efx)
XX_DTXC, XX_TXDRV_DTX_DEFAULT,
XX_DTXB, XX_TXDRV_DTX_DEFAULT,
XX_DTXA, XX_TXDRV_DTX_DEFAULT);
- falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC);
+ falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG);
}
-static void falcon_hold_xaui_in_rst(struct efx_nic *efx)
-{
- efx_dword_t reg;
-
- EFX_ZERO_DWORD(reg);
- EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-}
-
-static int _falcon_reset_xaui_a(struct efx_nic *efx)
-{
- efx_dword_t reg;
-
- falcon_hold_xaui_in_rst(efx);
- falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
-
- /* Follow the RAMBUS XAUI data reset sequencing
- * Channels A and B first: power down, reset PLL, reset, clear
- */
- EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
- EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
- EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- /* Channels C and D: power down, reset PLL, reset, clear */
- EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
- EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
- EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- /* Setup XAUI */
- falcon_setup_xaui(efx);
- udelay(10);
-
- /* Take XGXS out of reset */
- EFX_ZERO_DWORD(reg);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- return 0;
-}
-
-static int _falcon_reset_xaui_b(struct efx_nic *efx)
+int falcon_reset_xaui(struct efx_nic *efx)
{
- efx_dword_t reg;
+ efx_oword_t reg;
int count;
EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+ falcon_write(efx, &reg, XX_PWR_RST_REG);
/* Give some time for the link to establish */
for (count = 0; count < 1000; count++) { /* wait upto 10ms */
- falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
- if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
+ falcon_read(efx, &reg, XX_PWR_RST_REG);
+ if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
falcon_setup_xaui(efx);
return 0;
}
@@ -203,55 +99,41 @@ static int _falcon_reset_xaui_b(struct efx_nic *efx)
return -ETIMEDOUT;
}
-int falcon_reset_xaui(struct efx_nic *efx)
+static bool falcon_xgmii_status(struct efx_nic *efx)
{
- int rc;
-
- if (EFX_WORKAROUND_9388(efx)) {
- falcon_hold_xaui_in_rst(efx);
- efx->phy_op->reset_xaui(efx);
- rc = _falcon_reset_xaui_a(efx);
- } else {
- rc = _falcon_reset_xaui_b(efx);
- }
- return rc;
-}
-
-static int falcon_xgmii_status(struct efx_nic *efx)
-{
- efx_dword_t reg;
+ efx_oword_t reg;
if (falcon_rev(efx) < FALCON_REV_B0)
- return 1;
+ return true;
/* The ISR latches, so clear it and re-read */
- falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
- falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
+ falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
+ falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
- if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
- EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
+ if (EFX_OWORD_FIELD(reg, XM_LCLFLT) ||
+ EFX_OWORD_FIELD(reg, XM_RMTFLT)) {
EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
- return 0;
+ return false;
}
- return 1;
+ return true;
}
-static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
+static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
{
- efx_dword_t reg;
+ efx_oword_t reg;
if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
return;
/* Flush the ISR */
if (enable)
- falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
+ falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
- EFX_POPULATE_DWORD_2(reg,
+ EFX_POPULATE_OWORD_2(reg,
XM_MSK_RMTFLT, !enable,
XM_MSK_LCLFLT, !enable);
- falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0);
+ falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0);
}
int falcon_init_xmac(struct efx_nic *efx)
@@ -274,7 +156,7 @@ int falcon_init_xmac(struct efx_nic *efx)
if (rc)
goto fail2;
- falcon_mask_status_intr(efx, 1);
+ falcon_mask_status_intr(efx, true);
return 0;
fail2:
@@ -283,34 +165,34 @@ int falcon_init_xmac(struct efx_nic *efx)
return rc;
}
-int falcon_xaui_link_ok(struct efx_nic *efx)
+bool falcon_xaui_link_ok(struct efx_nic *efx)
{
- efx_dword_t reg;
- int align_done, sync_status, link_ok = 0;
+ efx_oword_t reg;
+ bool align_done, link_ok = false;
+ int sync_status;
if (LOOPBACK_INTERNAL(efx))
- return 1;
+ return true;
/* Read link status */
- falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+ falcon_read(efx, &reg, XX_CORE_STAT_REG);
- align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE);
- sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT);
+ align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE);
+ sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT);
if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
- link_ok = 1;
+ link_ok = true;
/* Clear link status ready for next read */
- EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
- EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
- EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
- falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
+ EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
+ EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
+ EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
+ falcon_write(efx, &reg, XX_CORE_STAT_REG);
/* If the link is up, then check the phy side of the xaui link
* (error conditions from the wire side propoagate back through
* the phy to the xaui side). */
if (efx->link_up && link_ok) {
- int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
- if (has_phyxs)
+ if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
}
@@ -325,15 +207,15 @@ int falcon_xaui_link_ok(struct efx_nic *efx)
static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
- efx_dword_t reg;
- int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
+ efx_oword_t reg;
+ bool rx_fc = !!(efx->flow_control & EFX_FC_RX);
/* Configure MAC - cut-thru mode is hard wired on */
EFX_POPULATE_DWORD_3(reg,
XM_RX_JUMBO_MODE, 1,
XM_TX_STAT_EN, 1,
XM_RX_STAT_EN, 1);
- falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
+ falcon_write(efx, &reg, XM_GLB_CFG_REG);
/* Configure TX */
EFX_POPULATE_DWORD_6(reg,
@@ -343,7 +225,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
XM_TXCRC, 1,
XM_FCNTL, 1,
XM_IPG, 0x3);
- falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC);
+ falcon_write(efx, &reg, XM_TX_CFG_REG);
/* Configure RX */
EFX_POPULATE_DWORD_5(reg,
@@ -352,21 +234,21 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
XM_ACPT_ALL_MCAST, 1,
XM_ACPT_ALL_UCAST, efx->promiscuous,
XM_PASS_CRC_ERR, 1);
- falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC);
+ falcon_write(efx, &reg, XM_RX_CFG_REG);
/* Set frame length */
max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
- falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC);
+ falcon_write(efx, &reg, XM_RX_PARAM_REG);
EFX_POPULATE_DWORD_2(reg,
XM_MAX_TX_FRM_SIZE, max_frame_len,
XM_TX_JUMBO_MODE, 1);
- falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC);
+ falcon_write(efx, &reg, XM_TX_PARAM_REG);
EFX_POPULATE_DWORD_2(reg,
XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
- XM_DIS_FCNTL, rx_fc ? 0 : 1);
- falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC);
+ XM_DIS_FCNTL, !rx_fc);
+ falcon_write(efx, &reg, XM_FC_REG);
/* Set MAC address */
EFX_POPULATE_DWORD_4(reg,
@@ -374,83 +256,75 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
XM_ADR_1, efx->net_dev->dev_addr[1],
XM_ADR_2, efx->net_dev->dev_addr[2],
XM_ADR_3, efx->net_dev->dev_addr[3]);
- falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC);
+ falcon_write(efx, &reg, XM_ADR_LO_REG);
EFX_POPULATE_DWORD_2(reg,
XM_ADR_4, efx->net_dev->dev_addr[4],
XM_ADR_5, efx->net_dev->dev_addr[5]);
- falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
+ falcon_write(efx, &reg, XM_ADR_HI_REG);
}
static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
{
- efx_dword_t reg;
- int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
- int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
- int xgmii_loopback =
- (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
+ efx_oword_t reg;
+ bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+ bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+ bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
/* XGXS block is flaky and will need to be reset if moving
* into our out of XGMII, XGXS or XAUI loopbacks. */
if (EFX_WORKAROUND_5147(efx)) {
- int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
- int reset_xgxs;
+ bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+ bool reset_xgxs;
- falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
- old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
- old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
+ falcon_read(efx, &reg, XX_CORE_STAT_REG);
+ old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN);
- falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
- old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
+ falcon_read(efx, &reg, XX_SD_CTL_REG);
+ old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA);
/* The PHY driver may have turned XAUI off */
reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
(xaui_loopback != old_xaui_loopback) ||
(xgmii_loopback != old_xgmii_loopback));
- if (reset_xgxs) {
- falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
- EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
- EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(1);
- EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
- EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(1);
- }
+
+ if (reset_xgxs)
+ falcon_reset_xaui(efx);
}
- falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
- EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
+ falcon_read(efx, &reg, XX_CORE_STAT_REG);
+ EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG,
(xgxs_loopback || xaui_loopback) ?
XX_FORCE_SIG_DECODE_FORCED : 0);
- EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
- EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
- falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
-
- falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
- EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
- EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
- EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
- EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
- falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC);
+ EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
+ falcon_write(efx, &reg, XX_CORE_STAT_REG);
+
+ falcon_read(efx, &reg, XX_SD_CTL_REG);
+ EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
+ falcon_write(efx, &reg, XX_SD_CTL_REG);
}
/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
* to come back up. Bash it until it comes back up */
-static int falcon_check_xaui_link_up(struct efx_nic *efx)
+static bool falcon_check_xaui_link_up(struct efx_nic *efx)
{
int max_tries, tries;
tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
max_tries = tries;
if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
- (efx->phy_type == PHY_TYPE_NONE))
- return 0;
+ (efx->phy_type == PHY_TYPE_NONE) ||
+ efx_phy_mode_disabled(efx->phy_mode))
+ return false;
while (tries) {
if (falcon_xaui_link_ok(efx))
- return 1;
+ return true;
EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
__func__, tries);
@@ -461,18 +335,22 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
max_tries);
- return 0;
+ return false;
}
void falcon_reconfigure_xmac(struct efx_nic *efx)
{
- int xaui_link_ok;
+ bool xaui_link_ok;
- falcon_mask_status_intr(efx, 0);
+ falcon_mask_status_intr(efx, false);
falcon_deconfigure_mac_wrapper(efx);
- efx->tx_disabled = LOOPBACK_INTERNAL(efx);
+ /* Reconfigure the PHY, disabling transmit in mac level loopback. */
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
efx->phy_op->reconfigure(efx);
falcon_reconfigure_xgxs_core(efx);
@@ -484,7 +362,7 @@ void falcon_reconfigure_xmac(struct efx_nic *efx)
xaui_link_ok = falcon_check_xaui_link_up(efx);
if (xaui_link_ok && efx->link_up)
- falcon_mask_status_intr(efx, 1);
+ falcon_mask_status_intr(efx, true);
}
void falcon_fini_xmac(struct efx_nic *efx)
@@ -554,21 +432,23 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
/* Update derived statistics */
mac_stats->tx_good_bytes =
- (mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
+ (mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
+ mac_stats->tx_control * 64);
mac_stats->rx_bad_bytes =
- (mac_stats->rx_bytes - mac_stats->rx_good_bytes);
+ (mac_stats->rx_bytes - mac_stats->rx_good_bytes -
+ mac_stats->rx_control * 64);
}
int falcon_check_xmac(struct efx_nic *efx)
{
- unsigned xaui_link_ok;
+ bool xaui_link_ok;
int rc;
if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
- (efx->phy_type == PHY_TYPE_NONE))
+ efx_phy_mode_disabled(efx->phy_mode))
return 0;
- falcon_mask_status_intr(efx, 0);
+ falcon_mask_status_intr(efx, false);
xaui_link_ok = falcon_xaui_link_ok(efx);
if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
@@ -579,7 +459,7 @@ int falcon_check_xmac(struct efx_nic *efx)
/* Unmask interrupt if everything was (and still is) ok */
if (xaui_link_ok && efx->link_up)
- falcon_mask_status_intr(efx, 1);
+ falcon_mask_status_intr(efx, true);
return rc;
}
@@ -620,7 +500,7 @@ int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
{
- int reset;
+ bool reset;
if (flow_control & EFX_FC_AUTO) {
EFX_LOG(efx, "10G does not support flow control "
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index edd07d4dee1..a31571c6913 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,10 +13,6 @@
#include "net_driver.h"
-extern void falcon_xmac_writel(struct efx_nic *efx,
- efx_dword_t *value, unsigned int mac_reg);
-extern void falcon_xmac_readl(struct efx_nic *efx,
- efx_dword_t *value, unsigned int mac_reg);
extern int falcon_init_xmac(struct efx_nic *efx);
extern void falcon_reconfigure_xmac(struct efx_nic *efx);
extern void falcon_update_stats_xmac(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index c4f540e93b7..003e48dcb2f 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -159,20 +159,21 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
return 0;
}
-int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
+bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
{
int phy_id = efx->mii.phy_id;
int status;
- int ok = 1;
+ bool ok = true;
int mmd = 0;
- int good;
/* If the port is in loopback, then we should only consider a subset
* of mmd's */
if (LOOPBACK_INTERNAL(efx))
- return 1;
+ return true;
else if (efx->loopback_mode == LOOPBACK_NETWORK)
- return 0;
+ return false;
+ else if (efx_phy_mode_disabled(efx->phy_mode))
+ return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
MDIO_MMDREG_DEVS0_PCS |
@@ -192,8 +193,7 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
status = mdio_clause45_read(efx, phy_id,
mmd, MDIO_MMDREG_STAT1);
- good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN);
- ok = ok && good;
+ ok = ok && (status & (1 << MDIO_MMDREG_STAT1_LINK_LBN));
}
mmd_mask = (mmd_mask >> 1);
mmd++;
@@ -208,7 +208,7 @@ void mdio_clause45_transmit_disable(struct efx_nic *efx)
ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_TXDIS);
- if (efx->tx_disabled)
+ if (efx->phy_mode & PHY_MODE_TX_DISABLED)
ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
else
ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index cb99f3f4491..19c42eaf7fb 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -199,18 +199,19 @@ static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
return (id_hi << 16) | (id_low);
}
-static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
+static inline bool mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
{
- int i, sync, lane_status;
+ int i, lane_status;
+ bool sync;
for (i = 0; i < 2; ++i)
lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
MDIO_MMD_PHYXS,
MDIO_PHYXS_LANE_STATE);
- sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0;
+ sync = !!(lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN));
if (!sync)
- EFX_INFO(efx, "XGXS lane status: %x\n", lane_status);
+ EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
return sync;
}
@@ -230,8 +231,8 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
unsigned int mmd_mask, unsigned int fatal_mask);
/* Check the link status of specified mmds in bit mask */
-extern int mdio_clause45_links_ok(struct efx_nic *efx,
- unsigned int mmd_mask);
+extern bool mdio_clause45_links_ok(struct efx_nic *efx,
+ unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 219c74a772c..cdb11fad605 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -88,9 +88,12 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
**************************************************************************/
#define EFX_MAX_CHANNELS 32
-#define EFX_MAX_TX_QUEUES 1
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
+#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
+#define EFX_TX_QUEUE_NO_CSUM 1
+#define EFX_TX_QUEUE_COUNT 2
+
/**
* struct efx_special_buffer - An Efx special buffer
* @addr: CPU base address of the buffer
@@ -127,7 +130,6 @@ struct efx_special_buffer {
* This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
* @unmap_single: True if pci_unmap_single should be used.
- * @unmap_addr: DMA address to unmap
* @unmap_len: Length of this fragment to unmap
*/
struct efx_tx_buffer {
@@ -135,9 +137,8 @@ struct efx_tx_buffer {
struct efx_tso_header *tsoh;
dma_addr_t dma_addr;
unsigned short len;
- unsigned char continuation;
- unsigned char unmap_single;
- dma_addr_t unmap_addr;
+ bool continuation;
+ bool unmap_single;
unsigned short unmap_len;
};
@@ -156,13 +157,13 @@ struct efx_tx_buffer {
*
* @efx: The associated Efx NIC
* @queue: DMA queue number
- * @used: Queue is used by net driver
* @channel: The associated channel
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
+ * @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
- * @stopped: Stopped flag.
+ * @stopped: Stopped count.
* Set if this TX queue is currently stopping its port.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
@@ -188,11 +189,11 @@ struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
int queue;
- int used;
struct efx_channel *channel;
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
+ bool flushed;
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
@@ -232,7 +233,6 @@ struct efx_rx_buffer {
* struct efx_rx_queue - An Efx RX queue
* @efx: The associated Efx NIC
* @queue: DMA queue number
- * @used: Queue is used by net driver
* @channel: The associated channel
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
@@ -262,11 +262,11 @@ struct efx_rx_buffer {
* the remaining space in the allocation.
* @buf_dma_addr: Page's DMA address.
* @buf_data: Page's host address.
+ * @flushed: Use when handling queue flushing
*/
struct efx_rx_queue {
struct efx_nic *efx;
int queue;
- int used;
struct efx_channel *channel;
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
@@ -288,6 +288,7 @@ struct efx_rx_queue {
struct page *buf_page;
dma_addr_t buf_dma_addr;
char *buf_data;
+ bool flushed;
};
/**
@@ -325,12 +326,10 @@ enum efx_rx_alloc_method {
* queue.
*
* @efx: Associated Efx NIC
- * @evqnum: Event queue number
* @channel: Channel instance number
* @used_flags: Channel is used by net driver
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
- * @has_interrupt: Channel has an interrupt
* @irq_moderation: IRQ moderation value (in us)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
@@ -357,17 +356,14 @@ enum efx_rx_alloc_method {
*/
struct efx_channel {
struct efx_nic *efx;
- int evqnum;
int channel;
int used_flags;
- int enabled;
+ bool enabled;
int irq;
- unsigned int has_interrupt;
unsigned int irq_moderation;
struct net_device *napi_dev;
struct napi_struct napi_str;
- struct work_struct reset_work;
- int work_pending;
+ bool work_pending;
struct efx_special_buffer eventq;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
@@ -390,7 +386,7 @@ struct efx_channel {
* access with prefetches.
*/
struct efx_rx_buffer *rx_pkt;
- int rx_pkt_csummed;
+ bool rx_pkt_csummed;
};
@@ -403,8 +399,8 @@ struct efx_channel {
*/
struct efx_blinker {
int led_num;
- int state;
- int resubmit;
+ bool state;
+ bool resubmit;
struct timer_list timer;
};
@@ -432,8 +428,8 @@ struct efx_board {
* have a separate init callback that happens later than
* board init. */
int (*init_leds)(struct efx_nic *efx);
- void (*set_fault_led) (struct efx_nic *efx, int state);
- void (*blink) (struct efx_nic *efx, int start);
+ void (*set_fault_led) (struct efx_nic *efx, bool state);
+ void (*blink) (struct efx_nic *efx, bool start);
void (*fini) (struct efx_nic *nic);
struct efx_blinker blinker;
struct i2c_client *hwmon_client, *ioexp_client;
@@ -467,8 +463,7 @@ enum nic_state {
STATE_INIT = 0,
STATE_RUNNING = 1,
STATE_FINI = 2,
- STATE_RESETTING = 3, /* rtnl_lock always held */
- STATE_DISABLED = 4,
+ STATE_DISABLED = 3,
STATE_MAX,
};
@@ -479,7 +474,7 @@ enum nic_state {
* This is the equivalent of NET_IP_ALIGN [which controls the alignment
* of the skb->head for hardware DMA].
*/
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#define EFX_PAGE_IP_ALIGN 0
#else
#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
@@ -512,7 +507,6 @@ enum efx_fc_type {
* @clear_interrupt: Clear down interrupt
* @blink: Blink LEDs
* @check_hw: Check hardware
- * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
* @mmds: MMD presence mask
* @loopbacks: Supported loopback modes mask
*/
@@ -522,11 +516,28 @@ struct efx_phy_operations {
void (*reconfigure) (struct efx_nic *efx);
void (*clear_interrupt) (struct efx_nic *efx);
int (*check_hw) (struct efx_nic *efx);
- void (*reset_xaui) (struct efx_nic *efx);
+ int (*test) (struct efx_nic *efx);
int mmds;
unsigned loopbacks;
};
+/**
+ * @enum efx_phy_mode - PHY operating mode flags
+ * @PHY_MODE_NORMAL: on and should pass traffic
+ * @PHY_MODE_TX_DISABLED: on with TX disabled
+ * @PHY_MODE_SPECIAL: on but will not pass traffic
+ */
+enum efx_phy_mode {
+ PHY_MODE_NORMAL = 0,
+ PHY_MODE_TX_DISABLED = 1,
+ PHY_MODE_SPECIAL = 8,
+};
+
+static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
+{
+ return !!(mode & ~PHY_MODE_TX_DISABLED);
+}
+
/*
* Efx extended statistics
*
@@ -632,7 +643,7 @@ union efx_multicast_hash {
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
- * @rss_queues: Number of RSS queues
+ * @n_rx_queues: Number of RX queues
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @irq_status: Interrupt status buffer
@@ -640,15 +651,20 @@ union efx_multicast_hash {
* This register is written with the SMP processor ID whenever an
* interrupt is handled. It is used by falcon_test_interrupt()
* to verify that an interrupt has occurred.
+ * @spi_flash: SPI flash device
+ * This field will be %NULL if no flash device is present.
+ * @spi_eeprom: SPI EEPROM device
+ * This field will be %NULL if no EEPROM device is present.
* @n_rx_nodesc_drop_cnt: RX no descriptor drop count
* @nic_data: Hardware dependant state
- * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
- * efx_reconfigure_port()
+ * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
+ * @port_inhibited, efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
* Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
* efx_reconfigure_work with kernel interfaces. Safe to read under any
* one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
* be held to modify it.
+ * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
@@ -658,14 +674,16 @@ union efx_multicast_hash {
* can provide. Generic code converts these into a standard
* &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
- * @stats_lock: Statistics update lock
+ * @stats_lock: Statistics update lock. Serialises statistics fetches
+ * @stats_enabled: Temporarily disable statistics fetches.
+ * Serialised by @stats_lock
* @mac_address: Permanent MAC address
* @phy_type: PHY type
* @phy_lock: PHY access lock
* @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
* @mii: PHY interface
- * @tx_disabled: PHY transmitter turned off
+ * @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @link_up: Link status
* @link_options: Link options (MII/GMII format)
* @n_link_state_changes: Number of times the link has changed state
@@ -700,27 +718,31 @@ struct efx_nic {
enum nic_state state;
enum reset_type reset_pending;
- struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
+ struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
struct efx_channel channel[EFX_MAX_CHANNELS];
- int rss_queues;
+ int n_rx_queues;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
struct efx_buffer irq_status;
volatile signed int last_irq_cpu;
+ struct efx_spi_device *spi_flash;
+ struct efx_spi_device *spi_eeprom;
+
unsigned n_rx_nodesc_drop_cnt;
struct falcon_nic_data *nic_data;
struct mutex mac_lock;
- int port_enabled;
+ bool port_enabled;
+ bool port_inhibited;
- int port_initialized;
+ bool port_initialized;
struct net_device *net_dev;
- int rx_checksum_enabled;
+ bool rx_checksum_enabled;
atomic_t netif_stop_count;
spinlock_t netif_stop_lock;
@@ -728,6 +750,7 @@ struct efx_nic {
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
+ bool stats_enabled;
unsigned char mac_address[ETH_ALEN];
@@ -736,13 +759,13 @@ struct efx_nic {
struct efx_phy_operations *phy_op;
void *phy_data;
struct mii_if_info mii;
- unsigned tx_disabled;
+ enum efx_phy_mode phy_mode;
- int link_up;
+ bool link_up;
unsigned int link_options;
unsigned int n_link_state_changes;
- int promiscuous;
+ bool promiscuous;
union efx_multicast_hash multicast_hash;
enum efx_fc_type flow_control;
struct work_struct reconfigure_work;
@@ -829,50 +852,33 @@ struct efx_nic_type {
continue; \
else
-/* Iterate over all used channels with interrupts */
-#define efx_for_each_channel_with_interrupt(_channel, _efx) \
- for (_channel = &_efx->channel[0]; \
- _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
- _channel++) \
- if (!(_channel->used_flags && _channel->has_interrupt)) \
- continue; \
- else
-
/* Iterate over all used TX queues */
#define efx_for_each_tx_queue(_tx_queue, _efx) \
for (_tx_queue = &_efx->tx_queue[0]; \
- _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \
- _tx_queue++) \
- if (!_tx_queue->used) \
- continue; \
- else
+ _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
+ _tx_queue++)
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
for (_tx_queue = &_channel->efx->tx_queue[0]; \
- _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \
+ _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
_tx_queue++) \
- if ((!_tx_queue->used) || \
- (_tx_queue->channel != _channel)) \
+ if (_tx_queue->channel != _channel) \
continue; \
else
/* Iterate over all used RX queues */
#define efx_for_each_rx_queue(_rx_queue, _efx) \
for (_rx_queue = &_efx->rx_queue[0]; \
- _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \
- _rx_queue++) \
- if (!_rx_queue->used) \
- continue; \
- else
+ _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
+ _rx_queue++)
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = &_channel->efx->rx_queue[0]; \
- _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
- _rx_queue++) \
- if ((!_rx_queue->used) || \
- (_rx_queue->channel != _channel)) \
+ for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \
+ _rx_queue; \
+ _rx_queue = NULL) \
+ if (_rx_queue->channel != _channel) \
continue; \
else
@@ -886,13 +892,13 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
}
/* Set bit in a little-endian bitfield */
-static inline void set_bit_le(int nr, unsigned char *addr)
+static inline void set_bit_le(unsigned nr, unsigned char *addr)
{
addr[nr / 8] |= (1 << (nr % 8));
}
/* Clear bit in a little-endian bitfield */
-static inline void clear_bit_le(int nr, unsigned char *addr)
+static inline void clear_bit_le(unsigned nr, unsigned char *addr)
{
addr[nr / 8] &= ~(1 << (nr % 8));
}
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 9d02c84e6b2..f746536f4ff 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -15,15 +15,7 @@
*/
extern struct efx_phy_operations falcon_tenxpress_phy_ops;
-enum tenxpress_state {
- TENXPRESS_STATUS_OFF = 0,
- TENXPRESS_STATUS_OTEMP = 1,
- TENXPRESS_STATUS_NORMAL = 2,
-};
-
-extern void tenxpress_set_state(struct efx_nic *efx,
- enum tenxpress_state state);
-extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
+extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
extern void tenxpress_crc_err(struct efx_nic *efx);
/****************************************************************************
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 0d27dd39bc0..0f805da4ce5 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -212,8 +212,8 @@ void efx_lro_fini(struct net_lro_mgr *lro_mgr)
* and populates a struct efx_rx_buffer with the relevant
* information. Return a negative error code or 0 on success.
*/
-static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
@@ -252,8 +252,8 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
* and populates a struct efx_rx_buffer with the relevant
* information. Return a negative error code or 0 on success.
*/
-static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = rx_queue->efx;
int bytes, space, offset;
@@ -319,8 +319,8 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
* and populates a struct efx_rx_buffer with the relevant
* information.
*/
-static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *new_rx_buf)
+static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *new_rx_buf)
{
int rc = 0;
@@ -340,8 +340,8 @@ static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
return rc;
}
-static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
{
if (rx_buf->page) {
EFX_BUG_ON_PARANOID(rx_buf->skb);
@@ -357,8 +357,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
}
}
-static inline void efx_free_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
{
if (rx_buf->page) {
__free_pages(rx_buf->page, efx->rx_buffer_order);
@@ -369,8 +369,8 @@ static inline void efx_free_rx_buffer(struct efx_nic *efx,
}
}
-static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
@@ -506,10 +506,10 @@ void efx_rx_work(struct work_struct *data)
efx_schedule_slow_fill(rx_queue, 1);
}
-static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- int len, int *discard,
- int *leak_packet)
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ int len, bool *discard,
+ bool *leak_packet)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -520,7 +520,7 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
/* The packet must be discarded, but this is only a fatal error
* if the caller indicated it was
*/
- *discard = 1;
+ *discard = true;
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
EFX_ERR_RL(efx, " RX queue %d seriously overlength "
@@ -546,8 +546,8 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
* Handles driverlink veto, and passes the fragment up via
* the appropriate LRO method
*/
-static inline void efx_rx_packet_lro(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_rx_packet_lro(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
{
struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
void *priv = channel;
@@ -574,9 +574,9 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
}
/* Allocate and construct an SKB around a struct page.*/
-static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
- struct efx_nic *efx,
- int hdr_len)
+static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
+ struct efx_nic *efx,
+ int hdr_len)
{
struct sk_buff *skb;
@@ -621,11 +621,11 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
}
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, int checksummed, int discard)
+ unsigned int len, bool checksummed, bool discard)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
- int leak_packet = 0;
+ bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
EFX_BUG_ON_PARANOID(!rx_buf->data);
@@ -683,11 +683,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf, int checksummed)
+ struct efx_rx_buffer *rx_buf, bool checksummed)
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
- int lro = efx->net_dev->features & NETIF_F_LRO;
+ bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
@@ -789,27 +789,18 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
/* Allocate RX buffers */
rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
- if (!rx_queue->buffer) {
- rc = -ENOMEM;
- goto fail1;
- }
+ if (!rx_queue->buffer)
+ return -ENOMEM;
rc = falcon_probe_rx(rx_queue);
- if (rc)
- goto fail2;
-
- return 0;
-
- fail2:
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
- fail1:
- rx_queue->used = 0;
-
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
return rc;
}
-int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, limit;
@@ -833,7 +824,7 @@ int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->fast_fill_limit = limit;
/* Set up RX descriptor ring */
- return falcon_init_rx(rx_queue);
+ falcon_init_rx(rx_queue);
}
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -872,7 +863,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
- rx_queue->used = 0;
}
void efx_flush_lro(struct efx_channel *channel)
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index f35e377bfc5..0e88a9ddc1c 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -14,7 +14,7 @@
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
@@ -24,6 +24,6 @@ void efx_rx_strategy(struct efx_channel *channel);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
void efx_rx_work(struct work_struct *data);
void __efx_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf, int checksummed);
+ struct efx_rx_buffer *rx_buf, bool checksummed);
#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 3b2de9fe7f2..362956e3fe1 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -27,6 +27,9 @@
#include "boards.h"
#include "workarounds.h"
#include "mac.h"
+#include "spi.h"
+#include "falcon_io.h"
+#include "mdio_10g.h"
/*
* Loopback test packet structure
@@ -51,7 +54,7 @@ static const char *payload_msg =
"Hello world! This is an Efx loopback test in progress!";
/**
- * efx_selftest_state - persistent state during a selftest
+ * efx_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in efx_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
@@ -59,10 +62,14 @@ static const char *payload_msg =
* @rx_bad: RX bad packet count
* @payload: Payload used in tests
*/
-struct efx_selftest_state {
- int flush;
+struct efx_loopback_state {
+ bool flush;
int packet_count;
struct sk_buff **skbs;
+
+ /* Checksums are being offloaded */
+ bool offload_csum;
+
atomic_t rx_good;
atomic_t rx_bad;
struct efx_loopback_payload payload;
@@ -70,21 +77,65 @@ struct efx_selftest_state {
/**************************************************************************
*
- * Configurable values
+ * MII, NVRAM and register tests
*
**************************************************************************/
-/* Level of loopback testing
- *
- * The maximum packet burst length is 16**(n-1), i.e.
- *
- * - Level 0 : no packets
- * - Level 1 : 1 packet
- * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
- * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
- *
- */
-static unsigned int loopback_test_level = 3;
+static int efx_test_mii(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+ u16 physid1, physid2;
+ struct mii_if_info *mii = &efx->mii;
+ struct net_device *net_dev = efx->net_dev;
+
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return 0;
+
+ mutex_lock(&efx->mac_lock);
+ tests->mii = -1;
+
+ physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
+ physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
+
+ if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+ (physid2 == 0x0000) || (physid2 == 0xffff)) {
+ EFX_ERR(efx, "no MII PHY present with ID %d\n",
+ mii->phy_id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = mdio_clause45_check_mmds(efx, efx->phy_op->mmds, 0);
+ if (rc)
+ goto out;
+
+out:
+ mutex_unlock(&efx->mac_lock);
+ tests->mii = rc ? -1 : 1;
+ return rc;
+}
+
+static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc;
+
+ rc = falcon_read_nvram(efx, NULL);
+ tests->nvram = rc ? -1 : 1;
+ return rc;
+}
+
+static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc;
+
+ /* Not supported on A-series silicon */
+ if (falcon_rev(efx) < FALCON_REV_B0)
+ return 0;
+
+ rc = falcon_test_registers(efx);
+ tests->registers = rc ? -1 : 1;
+ return rc;
+}
/**************************************************************************
*
@@ -107,7 +158,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
/* ACK each interrupting event queue. Receiving an interrupt due to
* traffic before a test event is raised is considered a pass */
- efx_for_each_channel_with_interrupt(channel, efx) {
+ efx_for_each_channel(channel, efx) {
if (channel->work_pending)
efx_process_channel_now(channel);
if (efx->last_irq_cpu >= 0)
@@ -132,41 +183,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
return 0;
}
-/* Test generation and receipt of non-interrupting events */
-static int efx_test_eventq(struct efx_channel *channel,
- struct efx_self_tests *tests)
-{
- unsigned int magic;
-
- /* Channel specific code, limited to 20 bits */
- magic = (0x00010150 + channel->channel);
- EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
- channel->channel, magic);
-
- tests->eventq_dma[channel->channel] = -1;
- tests->eventq_int[channel->channel] = 1; /* fake pass */
- tests->eventq_poll[channel->channel] = 1; /* fake pass */
-
- /* Reset flag and zero magic word */
- channel->efx->last_irq_cpu = -1;
- channel->eventq_magic = 0;
- smp_wmb();
-
- falcon_generate_test_event(channel, magic);
- udelay(1);
-
- efx_process_channel_now(channel);
- if (channel->eventq_magic != magic) {
- EFX_ERR(channel->efx, "channel %d failed to see test event\n",
- channel->channel);
- return -ETIMEDOUT;
- } else {
- tests->eventq_dma[channel->channel] = 1;
- }
-
- return 0;
-}
-
/* Test generation and receipt of interrupting events */
static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
@@ -230,39 +246,18 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
return 0;
}
-/**************************************************************************
- *
- * PHY testing
- *
- **************************************************************************/
-
-/* Check PHY presence by reading the PHY ID registers */
-static int efx_test_phy(struct efx_nic *efx,
- struct efx_self_tests *tests)
+static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests)
{
- u16 physid1, physid2;
- struct mii_if_info *mii = &efx->mii;
- struct net_device *net_dev = efx->net_dev;
+ int rc;
- if (efx->phy_type == PHY_TYPE_NONE)
+ if (!efx->phy_op->test)
return 0;
- EFX_LOG(efx, "testing PHY presence\n");
- tests->phy_ok = -1;
-
- physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
- physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
-
- if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
- (physid2 != 0x0000) && (physid2 != 0xffff)) {
- EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
- mii->phy_id, physid1, physid2);
- tests->phy_ok = 1;
- return 0;
- }
-
- EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
- return -ENODEV;
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->test(efx);
+ mutex_unlock(&efx->mac_lock);
+ tests->phy = rc ? -1 : 1;
+ return rc;
}
/**************************************************************************
@@ -278,7 +273,7 @@ static int efx_test_phy(struct efx_nic *efx,
void efx_loopback_rx_packet(struct efx_nic *efx,
const char *buf_ptr, int pkt_len)
{
- struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *received;
struct efx_loopback_payload *payload;
@@ -289,11 +284,12 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
return;
payload = &state->payload;
-
+
received = (struct efx_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr;
- received->ip.check = payload->ip.check;
-
+ if (state->offload_csum)
+ received->ip.check = payload->ip.check;
+
/* Check that header exists */
if (pkt_len < sizeof(received->header)) {
EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
@@ -362,7 +358,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
/* Initialise an efx_selftest_state for a new iteration */
static void efx_iterate_state(struct efx_nic *efx)
{
- struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_state *state = efx->loopback_selftest;
struct net_device *net_dev = efx->net_dev;
struct efx_loopback_payload *payload = &state->payload;
@@ -395,17 +391,17 @@ static void efx_iterate_state(struct efx_nic *efx)
smp_wmb();
}
-static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
+static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
- struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_loopback_payload *payload;
struct sk_buff *skb;
int i, rc;
/* Transmit N copies of buffer */
for (i = 0; i < state->packet_count; i++) {
- /* Allocate an skb, holding an extra reference for
+ /* Allocate an skb, holding an extra reference for
* transmit completion counting */
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
if (!skb)
@@ -444,11 +440,25 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
return 0;
}
-static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
- struct efx_loopback_self_tests *lb_tests)
+static int efx_poll_loopback(struct efx_nic *efx)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_channel *channel;
+
+ /* NAPI polling is not enabled, so process channels
+ * synchronously */
+ efx_for_each_channel(channel, efx) {
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ }
+ return atomic_read(&state->rx_good) == state->packet_count;
+}
+
+static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
{
struct efx_nic *efx = tx_queue->efx;
- struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_state *state = efx->loopback_selftest;
struct sk_buff *skb;
int tx_done = 0, rx_good, rx_bad;
int i, rc = 0;
@@ -507,11 +517,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
struct efx_loopback_self_tests *lb_tests)
{
struct efx_nic *efx = tx_queue->efx;
- struct efx_selftest_state *state = efx->loopback_selftest;
- struct efx_channel *channel;
- int i, rc = 0;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ int i, begin_rc, end_rc;
- for (i = 0; i < loopback_test_level; i++) {
+ for (i = 0; i < 3; i++) {
/* Determine how many packets to send */
state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
@@ -519,30 +528,31 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
state->packet_count, GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
- state->flush = 0;
+ state->flush = false;
EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
"packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
efx_iterate_state(efx);
- rc = efx_tx_loopback(tx_queue);
-
- /* NAPI polling is not enabled, so process channels synchronously */
- schedule_timeout_uninterruptible(HZ / 50);
- efx_for_each_channel_with_interrupt(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
+ begin_rc = efx_begin_loopback(tx_queue);
+
+ /* This will normally complete very quickly, but be
+ * prepared to wait up to 100 ms. */
+ msleep(1);
+ if (!efx_poll_loopback(efx)) {
+ msleep(100);
+ efx_poll_loopback(efx);
}
- rc |= efx_rx_loopback(tx_queue, lb_tests);
+ end_rc = efx_end_loopback(tx_queue, lb_tests);
kfree(state->skbs);
- if (rc) {
+ if (begin_rc || end_rc) {
/* Wait a while to ensure there are no packets
* floating around after a failure. */
schedule_timeout_uninterruptible(HZ / 10);
- return rc;
+ return begin_rc ? begin_rc : end_rc;
}
}
@@ -550,49 +560,36 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
- return rc;
+ return 0;
}
-static int efx_test_loopbacks(struct efx_nic *efx,
+static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd,
struct efx_self_tests *tests,
unsigned int loopback_modes)
{
- struct efx_selftest_state *state = efx->loopback_selftest;
- struct ethtool_cmd ecmd, ecmd_loopback;
+ enum efx_loopback_mode mode;
+ struct efx_loopback_state *state;
struct efx_tx_queue *tx_queue;
- enum efx_loopback_mode old_mode, mode;
- int count, rc = 0, link_up;
-
- rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
- if (rc) {
- EFX_ERR(efx, "could not get GMII settings\n");
- return rc;
- }
- old_mode = efx->loopback_mode;
-
- /* Disable autonegotiation for the purposes of loopback */
- memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
- if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
- ecmd_loopback.autoneg = AUTONEG_DISABLE;
- ecmd_loopback.duplex = DUPLEX_FULL;
- ecmd_loopback.speed = SPEED_10000;
- }
+ bool link_up;
+ int count, rc = 0;
- rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
- if (rc) {
- EFX_ERR(efx, "could not disable autonegotiation\n");
- goto out;
- }
- tests->loopback_speed = ecmd_loopback.speed;
- tests->loopback_full_duplex = ecmd_loopback.duplex;
+ /* Set the port loopback_selftest member. From this point on
+ * all received packets will be dropped. Mark the state as
+ * "flushing" so all inflight packets are dropped */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return -ENOMEM;
+ BUG_ON(efx->loopback_selftest);
+ state->flush = true;
+ efx->loopback_selftest = state;
/* Test all supported loopback modes */
- for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(loopback_modes & (1 << mode)))
continue;
/* Move the port into the specified loopback mode. */
- state->flush = 1;
+ state->flush = true;
efx->loopback_mode = mode;
efx_reconfigure_port(efx);
@@ -616,7 +613,7 @@ static int efx_test_loopbacks(struct efx_nic *efx,
*/
link_up = efx->link_up;
if (!falcon_xaui_link_ok(efx))
- link_up = 0;
+ link_up = false;
} while ((++count < 20) && !link_up);
@@ -634,18 +631,21 @@ static int efx_test_loopbacks(struct efx_nic *efx,
/* Test every TX queue */
efx_for_each_tx_queue(tx_queue, efx) {
- rc |= efx_test_loopback(tx_queue,
- &tests->loopback[mode]);
+ state->offload_csum = (tx_queue->queue ==
+ EFX_TX_QUEUE_OFFLOAD_CSUM);
+ rc = efx_test_loopback(tx_queue,
+ &tests->loopback[mode]);
if (rc)
goto out;
}
}
out:
- /* Take out of loopback and restore PHY settings */
- state->flush = 1;
- efx->loopback_mode = old_mode;
- efx_ethtool_set_settings(efx->net_dev, &ecmd);
+ /* Remove the flush. The caller will remove the loopback setting */
+ state->flush = true;
+ efx->loopback_selftest = NULL;
+ wmb();
+ kfree(state);
return rc;
}
@@ -661,23 +661,27 @@ static int efx_test_loopbacks(struct efx_nic *efx,
int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
{
struct efx_channel *channel;
- int rc = 0;
+ int rc, rc2 = 0;
+
+ rc = efx_test_mii(efx, tests);
+ if (rc && !rc2)
+ rc2 = rc;
- EFX_LOG(efx, "performing online self-tests\n");
+ rc = efx_test_nvram(efx, tests);
+ if (rc && !rc2)
+ rc2 = rc;
+
+ rc = efx_test_interrupts(efx, tests);
+ if (rc && !rc2)
+ rc2 = rc;
- rc |= efx_test_interrupts(efx, tests);
efx_for_each_channel(channel, efx) {
- if (channel->has_interrupt)
- rc |= efx_test_eventq_irq(channel, tests);
- else
- rc |= efx_test_eventq(channel, tests);
+ rc = efx_test_eventq_irq(channel, tests);
+ if (rc && !rc2)
+ rc2 = rc;
}
- rc |= efx_test_phy(efx, tests);
-
- if (rc)
- EFX_ERR(efx, "failed online self-tests\n");
- return rc;
+ return rc2;
}
/* Offline (i.e. disruptive) testing
@@ -685,35 +689,66 @@ int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
int efx_offline_test(struct efx_nic *efx,
struct efx_self_tests *tests, unsigned int loopback_modes)
{
- struct efx_selftest_state *state;
- int rc = 0;
-
- EFX_LOG(efx, "performing offline self-tests\n");
+ enum efx_loopback_mode loopback_mode = efx->loopback_mode;
+ int phy_mode = efx->phy_mode;
+ struct ethtool_cmd ecmd, ecmd_test;
+ int rc, rc2 = 0;
+
+ /* force the carrier state off so the kernel doesn't transmit during
+ * the loopback test, and the watchdog timeout doesn't fire. Also put
+ * falcon into loopback for the register test.
+ */
+ mutex_lock(&efx->mac_lock);
+ efx->port_inhibited = true;
+ if (efx->loopback_modes)
+ efx->loopback_mode = __ffs(efx->loopback_modes);
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ /* free up all consumers of SRAM (including all the queues) */
+ efx_reset_down(efx, &ecmd);
+
+ rc = efx_test_chip(efx, tests);
+ if (rc && !rc2)
+ rc2 = rc;
+
+ /* reset the chip to recover from the register test */
+ rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
+
+ /* Modify the saved ecmd so that when efx_reset_up() restores the phy
+ * state, AN is disabled, and the phy is powered, and out of loopback */
+ memcpy(&ecmd_test, &ecmd, sizeof(ecmd_test));
+ if (ecmd_test.autoneg == AUTONEG_ENABLE) {
+ ecmd_test.autoneg = AUTONEG_DISABLE;
+ ecmd_test.duplex = DUPLEX_FULL;
+ ecmd_test.speed = SPEED_10000;
+ }
+ efx->loopback_mode = LOOPBACK_NONE;
- /* Create a selftest_state structure to hold state for the test */
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state == NULL) {
- rc = -ENOMEM;
- goto out;
+ rc = efx_reset_up(efx, &ecmd_test, rc == 0);
+ if (rc) {
+ EFX_ERR(efx, "Unable to recover from chip test\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc;
}
- /* Set the port loopback_selftest member. From this point on
- * all received packets will be dropped. Mark the state as
- * "flushing" so all inflight packets are dropped */
- BUG_ON(efx->loopback_selftest);
- state->flush = 1;
- efx->loopback_selftest = state;
+ tests->loopback_speed = ecmd_test.speed;
+ tests->loopback_full_duplex = ecmd_test.duplex;
- rc = efx_test_loopbacks(efx, tests, loopback_modes);
+ rc = efx_test_phy(efx, tests);
+ if (rc && !rc2)
+ rc2 = rc;
- efx->loopback_selftest = NULL;
- wmb();
- kfree(state);
+ rc = efx_test_loopbacks(efx, ecmd_test, tests, loopback_modes);
+ if (rc && !rc2)
+ rc2 = rc;
- out:
- if (rc)
- EFX_ERR(efx, "failed offline self-tests\n");
+ /* restore the PHY to the previous state */
+ efx->loopback_mode = loopback_mode;
+ efx->phy_mode = phy_mode;
+ efx->port_inhibited = false;
+ efx_ethtool_set_settings(efx->net_dev, &ecmd);
- return rc;
+ return rc2;
}
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6999c2b622..fc15df15d76 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
*/
struct efx_loopback_self_tests {
- int tx_sent[EFX_MAX_TX_QUEUES];
- int tx_done[EFX_MAX_TX_QUEUES];
+ int tx_sent[EFX_TX_QUEUE_COUNT];
+ int tx_done[EFX_TX_QUEUE_COUNT];
int rx_good;
int rx_bad;
};
@@ -29,14 +29,19 @@ struct efx_loopback_self_tests {
* indicates failure.
*/
struct efx_self_tests {
+ /* online tests */
+ int mii;
+ int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
int eventq_poll[EFX_MAX_CHANNELS];
- int phy_ok;
+ /* offline tests */
+ int registers;
+ int phy;
int loopback_speed;
int loopback_full_duplex;
- struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
+ struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
extern void efx_loopback_rx_packet(struct efx_nic *efx,
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index b2784952399..fe4e3fd2233 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -13,11 +13,13 @@
* the PHY
*/
#include <linux/delay.h>
+#include "net_driver.h"
#include "efx.h"
#include "phy.h"
#include "boards.h"
#include "falcon.h"
#include "falcon_hwdefs.h"
+#include "falcon_io.h"
#include "mac.h"
/**************************************************************************
@@ -120,23 +122,144 @@ static void sfe4001_poweroff(struct efx_nic *efx)
i2c_smbus_read_byte_data(hwmon_client, RSL);
}
-static void sfe4001_fini(struct efx_nic *efx)
+static int sfe4001_poweron(struct efx_nic *efx)
{
- EFX_INFO(efx, "%s\n", __func__);
+ struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
+ struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
+ unsigned int i, j;
+ int rc;
+ u8 out;
+
+ /* Clear any previous over-temperature alert */
+ rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
+ if (rc < 0)
+ return rc;
+
+ /* Enable port 0 and port 1 outputs on IO expander */
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+ if (rc)
+ return rc;
+ rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
+ 0xff & ~(1 << P1_SPARE_LBN));
+ if (rc)
+ goto fail_on;
+
+ /* If PHY power is on, turn it all off and wait 1 second to
+ * ensure a full reset.
+ */
+ rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
+ if (rc < 0)
+ goto fail_on;
+ out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
+ (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
+ (0 << P0_EN_1V0X_LBN));
+ if (rc != out) {
+ EFX_INFO(efx, "power-cycling PHY\n");
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+ schedule_timeout_uninterruptible(HZ);
+ }
+ for (i = 0; i < 20; ++i) {
+ /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
+ out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
+ (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
+ (1 << P0_X_TRST_LBN));
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ out |= 1 << P0_EN_3V3X_LBN;
+
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+ msleep(10);
+
+ /* Turn on 1V power rail */
+ out &= ~(1 << P0_EN_1V0X_LBN);
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+
+ EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
+
+ /* In flash config mode, DSP does not turn on AFE, so
+ * just wait 1 second.
+ */
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ schedule_timeout_uninterruptible(HZ);
+ return 0;
+ }
+
+ for (j = 0; j < 10; ++j) {
+ msleep(100);
+
+ /* Check DSP has asserted AFE power line */
+ rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
+ if (rc < 0)
+ goto fail_on;
+ if (rc & (1 << P1_AFE_PWD_LBN))
+ return 0;
+ }
+ }
+
+ EFX_INFO(efx, "timed out waiting for DSP boot\n");
+ rc = -ETIMEDOUT;
+fail_on:
sfe4001_poweroff(efx);
- i2c_unregister_device(efx->board_info.ioexp_client);
- i2c_unregister_device(efx->board_info.hwmon_client);
+ return rc;
}
-/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
- * to the FLASH_CFG_1 input on the DSP. We must keep it high at power-
- * up to allow writing the flash (done through MDIO from userland).
+/* On SFE4001 rev A2 and later, we can control the FLASH_CFG_1 pin
+ * using the 3V3X output of the IO-expander. Allow the user to set
+ * this when the device is stopped, and keep it stopped then.
*/
-unsigned int sfe4001_phy_flash_cfg;
-module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444);
-MODULE_PARM_DESC(phy_flash_cfg,
- "Force PHY to enter flash configuration mode");
+
+static ssize_t show_phy_flash_cfg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
+}
+
+static ssize_t set_phy_flash_cfg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ enum efx_phy_mode old_mode, new_mode;
+ int err;
+
+ rtnl_lock();
+ old_mode = efx->phy_mode;
+ if (count == 0 || *buf == '0')
+ new_mode = old_mode & ~PHY_MODE_SPECIAL;
+ else
+ new_mode = PHY_MODE_SPECIAL;
+ if (old_mode == new_mode) {
+ err = 0;
+ } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+ err = -EBUSY;
+ } else {
+ efx->phy_mode = new_mode;
+ err = sfe4001_poweron(efx);
+ efx_reconfigure_port(efx);
+ }
+ rtnl_unlock();
+
+ return err ? err : count;
+}
+
+static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
+
+static void sfe4001_fini(struct efx_nic *efx)
+{
+ EFX_INFO(efx, "%s\n", __func__);
+
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+ sfe4001_poweroff(efx);
+ i2c_unregister_device(efx->board_info.ioexp_client);
+ i2c_unregister_device(efx->board_info.hwmon_client);
+}
/* This board uses an I2C expander to provider power to the PHY, which needs to
* be turned on before the PHY can be used.
@@ -144,41 +267,14 @@ MODULE_PARM_DESC(phy_flash_cfg,
*/
int sfe4001_init(struct efx_nic *efx)
{
- struct i2c_client *hwmon_client, *ioexp_client;
- unsigned int count;
+ struct i2c_client *hwmon_client;
int rc;
- u8 out;
- efx_dword_t reg;
hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647);
if (!hwmon_client)
return -EIO;
efx->board_info.hwmon_client = hwmon_client;
- ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
- if (!ioexp_client) {
- rc = -EIO;
- goto fail_hwmon;
- }
- efx->board_info.ioexp_client = ioexp_client;
-
- /* 10Xpress has fixed-function LED pins, so there is no board-specific
- * blink code. */
- efx->board_info.blink = tenxpress_phy_blink;
-
- /* Ensure that XGXS and XAUI SerDes are held in reset */
- EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
- XX_PWRDNB_EN, 1,
- XX_RSTPLLAB_EN, 1,
- XX_RESETA_EN, 1,
- XX_RESETB_EN, 1,
- XX_RSTXGXSRX_EN, 1,
- XX_RSTXGXSTX_EN, 1);
- falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
- udelay(10);
-
- efx->board_info.fini = sfe4001_fini;
-
/* Set DSP over-temperature alert threshold */
EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
rc = i2c_smbus_write_byte_data(hwmon_client, WLHO,
@@ -195,78 +291,34 @@ int sfe4001_init(struct efx_nic *efx)
goto fail_ioexp;
}
- /* Clear any previous over-temperature alert */
- rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
- if (rc < 0)
- goto fail_ioexp;
+ efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
+ if (!efx->board_info.ioexp_client) {
+ rc = -EIO;
+ goto fail_hwmon;
+ }
- /* Enable port 0 and port 1 outputs on IO expander */
- rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+ /* 10Xpress has fixed-function LED pins, so there is no board-specific
+ * blink code. */
+ efx->board_info.blink = tenxpress_phy_blink;
+
+ efx->board_info.fini = sfe4001_fini;
+
+ rc = sfe4001_poweron(efx);
if (rc)
goto fail_ioexp;
- rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
- 0xff & ~(1 << P1_SPARE_LBN));
- if (rc)
- goto fail_on;
- /* Turn all power off then wait 1 sec. This ensures PHY is reset */
- out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
- (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
- (0 << P0_EN_1V0X_LBN));
- rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
if (rc)
goto fail_on;
- schedule_timeout_uninterruptible(HZ);
- count = 0;
- do {
- /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
- out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
- (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
- (1 << P0_X_TRST_LBN));
- if (sfe4001_phy_flash_cfg)
- out |= 1 << P0_EN_3V3X_LBN;
-
- rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
- if (rc)
- goto fail_on;
- msleep(10);
-
- /* Turn on 1V power rail */
- out &= ~(1 << P0_EN_1V0X_LBN);
- rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
- if (rc)
- goto fail_on;
-
- EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
-
- schedule_timeout_uninterruptible(HZ);
-
- /* Check DSP is powered */
- rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
- if (rc < 0)
- goto fail_on;
- if (rc & (1 << P1_AFE_PWD_LBN))
- goto done;
-
- /* DSP doesn't look powered in flash config mode */
- if (sfe4001_phy_flash_cfg)
- goto done;
- } while (++count < 20);
-
- EFX_INFO(efx, "timed out waiting for power\n");
- rc = -ETIMEDOUT;
- goto fail_on;
-
-done:
EFX_INFO(efx, "PHY is powered on\n");
return 0;
fail_on:
sfe4001_poweroff(efx);
fail_ioexp:
- i2c_unregister_device(ioexp_client);
+ i2c_unregister_device(efx->board_info.ioexp_client);
fail_hwmon:
- i2c_unregister_device(hwmon_client);
+ i2c_unregister_device(hwmon_client);
return rc;
}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 34412f3d41c..feef6194237 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -19,53 +19,48 @@
*
*************************************************************************/
-/*
- * Commands common to all known devices.
- *
+#define SPI_WRSR 0x01 /* Write status register */
+#define SPI_WRITE 0x02 /* Write data to memory array */
+#define SPI_READ 0x03 /* Read data from memory array */
+#define SPI_WRDI 0x04 /* Reset write enable latch */
+#define SPI_RDSR 0x05 /* Read status register */
+#define SPI_WREN 0x06 /* Set write enable latch */
+
+#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
+
+/**
+ * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
+ * @efx: The Efx controller that owns this device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ * @read: Read function for the device
+ * @write: Write function for the device
*/
-
-/* Write status register */
-#define SPI_WRSR 0x01
-
-/* Write data to memory array */
-#define SPI_WRITE 0x02
-
-/* Read data from memory array */
-#define SPI_READ 0x03
-
-/* Reset write enable latch */
-#define SPI_WRDI 0x04
-
-/* Read status register */
-#define SPI_RDSR 0x05
-
-/* Set write enable latch */
-#define SPI_WREN 0x06
-
-/* SST: Enable write to status register */
-#define SPI_SST_EWSR 0x50
-
-/*
- * Status register bits. Not all bits are supported on all devices.
- *
- */
-
-/* Write-protect pin enabled */
-#define SPI_STATUS_WPEN 0x80
-
-/* Block protection bit 2 */
-#define SPI_STATUS_BP2 0x10
-
-/* Block protection bit 1 */
-#define SPI_STATUS_BP1 0x08
-
-/* Block protection bit 0 */
-#define SPI_STATUS_BP0 0x04
-
-/* State of the write enable latch */
-#define SPI_STATUS_WEN 0x02
-
-/* Device busy flag */
-#define SPI_STATUS_NRDY 0x01
+struct efx_spi_device {
+ struct efx_nic *efx;
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ unsigned int block_size;
+};
+
+int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer);
+int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer);
#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index c0146061c32..d507c93d666 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -65,25 +65,10 @@
#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
-/* Self test (BIST) control register */
-#define PMA_PMD_BIST_CTRL_REG (0xc014)
-#define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */
-#define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */
-#define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */
-/* Self test status register */
-#define PMA_PMD_BIST_STAT_REG (0xc015)
-#define PMA_PMD_BIST_ENX_LBN (3)
-#define PMA_PMD_BIST_PMA_LBN (2)
-#define PMA_PMD_BIST_RXD_LBN (1)
-#define PMA_PMD_BIST_AFE_LBN (0)
-
/* Special Software reset register */
#define PMA_PMD_EXT_CTRL_REG 49152
#define PMA_PMD_EXT_SSR_LBN 15
-#define BIST_MAX_DELAY (1000)
-#define BIST_POLL_DELAY (10)
-
/* Misc register defines */
#define PCS_CLOCK_CTRL_REG 0xd801
#define PLL312_RST_N_LBN 2
@@ -119,27 +104,12 @@ MODULE_PARM_DESC(crc_error_reset_threshold,
"Max number of CRC errors before XAUI reset");
struct tenxpress_phy_data {
- enum tenxpress_state state;
enum efx_loopback_mode loopback_mode;
atomic_t bad_crc_count;
- int tx_disabled;
+ enum efx_phy_mode phy_mode;
int bad_lp_tries;
};
-static int tenxpress_state_is(struct efx_nic *efx, int state)
-{
- struct tenxpress_phy_data *phy_data = efx->phy_data;
- return (phy_data != NULL) && (state == phy_data->state);
-}
-
-void tenxpress_set_state(struct efx_nic *efx,
- enum tenxpress_state state)
-{
- struct tenxpress_phy_data *phy_data = efx->phy_data;
- if (phy_data != NULL)
- phy_data->state = state;
-}
-
void tenxpress_crc_err(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
@@ -176,8 +146,6 @@ static int tenxpress_phy_check(struct efx_nic *efx)
return 0;
}
-static void tenxpress_reset_xaui(struct efx_nic *efx);
-
static int tenxpress_init(struct efx_nic *efx)
{
int rc, reg;
@@ -214,15 +182,12 @@ static int tenxpress_phy_init(struct efx_nic *efx)
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
- tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
-
- if (!sfe4001_phy_flash_cfg) {
- rc = mdio_clause45_wait_reset_mmds(efx,
- TENXPRESS_REQUIRED_DEVS);
- if (rc < 0)
- goto fail;
- }
+ rc = mdio_clause45_wait_reset_mmds(efx,
+ TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ goto fail;
rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
@@ -249,7 +214,10 @@ static int tenxpress_special_reset(struct efx_nic *efx)
{
int rc, reg;
- EFX_TRACE(efx, "%s\n", __func__);
+ /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
+ * a special software reset can glitch the XGMAC sufficiently for stats
+ * requests to fail. Since we don't ofen special_reset, just lock. */
+ spin_lock(&efx->stats_lock);
/* Initiate reset */
reg = mdio_clause45_read(efx, efx->mii.phy_id,
@@ -258,23 +226,25 @@ static int tenxpress_special_reset(struct efx_nic *efx)
mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_EXT_CTRL_REG, reg);
- msleep(200);
+ mdelay(200);
/* Wait for the blocks to come out of reset */
rc = mdio_clause45_wait_reset_mmds(efx,
TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
- return rc;
+ goto unlock;
/* Try and reconfigure the device */
rc = tenxpress_init(efx);
if (rc < 0)
- return rc;
+ goto unlock;
- return 0;
+unlock:
+ spin_unlock(&efx->stats_lock);
+ return rc;
}
-static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
+static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp)
{
struct tenxpress_phy_data *pd = efx->phy_data;
int reg;
@@ -311,15 +281,15 @@ static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
* into a non-10GBT port and if so warn the user that they won't get
* link any time soon as we are 10GBT only, unless caller specified
* not to do this check (it isn't useful in loopback) */
-static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
+static bool tenxpress_link_ok(struct efx_nic *efx, bool check_lp)
{
- int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
+ bool ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
if (ok) {
- tenxpress_set_bad_lp(efx, 0);
+ tenxpress_set_bad_lp(efx, false);
} else if (check_lp) {
/* Are we plugged into the wrong sort of link? */
- int bad_lp = 0;
+ bool bad_lp = false;
int phy_id = efx->mii.phy_id;
int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_STATUS);
@@ -332,7 +302,7 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
* bit has the advantage of not clearing when autoneg
* restarts. */
if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
- tenxpress_set_bad_lp(efx, 0);
+ tenxpress_set_bad_lp(efx, false);
return ok;
}
@@ -367,16 +337,19 @@ static void tenxpress_phyxs_loopback(struct efx_nic *efx)
static void tenxpress_phy_reconfigure(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
- int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
- TENXPRESS_LOOPBACKS);
+ bool loop_change = LOOPBACK_OUT_OF(phy_data, efx,
+ TENXPRESS_LOOPBACKS);
- if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ phy_data->phy_mode = efx->phy_mode;
return;
+ }
/* When coming out of transmit disable, coming out of low power
* mode, or moving out of any PHY internal loopback mode,
* perform a special software reset */
- if ((phy_data->tx_disabled && !efx->tx_disabled) ||
+ if ((efx->phy_mode == PHY_MODE_NORMAL &&
+ phy_data->phy_mode != PHY_MODE_NORMAL) ||
loop_change) {
tenxpress_special_reset(efx);
falcon_reset_xaui(efx);
@@ -386,9 +359,9 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
mdio_clause45_phy_reconfigure(efx);
tenxpress_phyxs_loopback(efx);
- phy_data->tx_disabled = efx->tx_disabled;
phy_data->loopback_mode = efx->loopback_mode;
- efx->link_up = tenxpress_link_ok(efx, 0);
+ phy_data->phy_mode = efx->phy_mode;
+ efx->link_up = tenxpress_link_ok(efx, false);
efx->link_options = GM_LPA_10000FULL;
}
@@ -402,16 +375,14 @@ static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
static int tenxpress_phy_check_hw(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
- int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL);
- int link_ok;
+ bool link_ok;
- link_ok = phy_up && tenxpress_link_ok(efx, 1);
+ link_ok = tenxpress_link_ok(efx, true);
if (link_ok != efx->link_up)
falcon_xmac_sim_phy_event(efx);
- /* Nothing to check if we've already shut down the PHY */
- if (!phy_up)
+ if (phy_data->phy_mode != PHY_MODE_NORMAL)
return 0;
if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
@@ -444,7 +415,7 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
* (which probably aren't wired anyway) are left in AUTO mode */
-void tenxpress_phy_blink(struct efx_nic *efx, int blink)
+void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
{
int reg;
@@ -459,52 +430,10 @@ void tenxpress_phy_blink(struct efx_nic *efx, int blink)
PMA_PMD_LED_OVERR_REG, reg);
}
-static void tenxpress_reset_xaui(struct efx_nic *efx)
+static int tenxpress_phy_test(struct efx_nic *efx)
{
- int phy = efx->mii.phy_id;
- int clk_ctrl, test_select, soft_rst2;
-
- /* Real work is done on clock_ctrl other resets are thought to be
- * optional but make the reset more reliable
- */
-
- /* Read */
- clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
- PCS_CLOCK_CTRL_REG);
- test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
- PCS_TEST_SELECT_REG);
- soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
- PCS_SOFT_RST2_REG);
-
- /* Put in reset */
- test_select &= ~(1 << CLK312_EN_LBN);
- mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
- PCS_TEST_SELECT_REG, test_select);
-
- soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
- mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
- PCS_SOFT_RST2_REG, soft_rst2);
-
- clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
- mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
- PCS_CLOCK_CTRL_REG, clk_ctrl);
- udelay(10);
-
- /* Remove reset */
- clk_ctrl |= (1 << PLL312_RST_N_LBN);
- mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
- PCS_CLOCK_CTRL_REG, clk_ctrl);
- udelay(10);
-
- soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
- mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
- PCS_SOFT_RST2_REG, soft_rst2);
- udelay(10);
-
- test_select |= (1 << CLK312_EN_LBN);
- mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
- PCS_TEST_SELECT_REG, test_select);
- udelay(10);
+ /* BIST is automatically run after a special software reset */
+ return tenxpress_special_reset(efx);
}
struct efx_phy_operations falcon_tenxpress_phy_ops = {
@@ -513,7 +442,7 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
.check_hw = tenxpress_phy_check_hw,
.fini = tenxpress_phy_fini,
.clear_interrupt = tenxpress_phy_clear_interrupt,
- .reset_xaui = tenxpress_reset_xaui,
+ .test = tenxpress_phy_test,
.mmds = TENXPRESS_REQUIRED_DEVS,
.loopbacks = TENXPRESS_LOOPBACKS,
};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5e8374ab28e..da3e9ff339f 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -47,7 +47,7 @@ void efx_stop_queue(struct efx_nic *efx)
* We want to be able to nest calls to netif_stop_queue(), since each
* channel can have an individual stop on the queue.
*/
-inline void efx_wake_queue(struct efx_nic *efx)
+void efx_wake_queue(struct efx_nic *efx)
{
local_bh_disable();
if (atomic_dec_and_lock(&efx->netif_stop_count,
@@ -59,19 +59,21 @@ inline void efx_wake_queue(struct efx_nic *efx)
local_bh_enable();
}
-static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
+ buffer->unmap_len);
if (buffer->unmap_single)
- pci_unmap_single(pci_dev, buffer->unmap_addr,
- buffer->unmap_len, PCI_DMA_TODEVICE);
+ pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
else
- pci_unmap_page(pci_dev, buffer->unmap_addr,
- buffer->unmap_len, PCI_DMA_TODEVICE);
+ pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
- buffer->unmap_single = 0;
+ buffer->unmap_single = false;
}
if (buffer->skb) {
@@ -103,13 +105,13 @@ struct efx_tso_header {
};
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb);
+ struct sk_buff *skb);
static void efx_fini_tso(struct efx_tx_queue *tx_queue);
static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh);
-static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
{
if (buffer->tsoh) {
if (likely(!buffer->tsoh->unmap_len)) {
@@ -136,8 +138,8 @@ static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
* Returns NETDEV_TX_OK or NETDEV_TX_BUSY
* You must hold netif_tx_lock() to call this function.
*/
-static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb)
+static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
{
struct efx_nic *efx = tx_queue->efx;
struct pci_dev *pci_dev = efx->pci_dev;
@@ -148,7 +150,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
dma_addr_t dma_addr, unmap_addr = 0;
unsigned int dma_len;
- unsigned unmap_single;
+ bool unmap_single;
int q_space, i = 0;
int rc = NETDEV_TX_OK;
@@ -167,7 +169,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
* since this is more efficient on machines with sparse
* memory.
*/
- unmap_single = 1;
+ unmap_single = true;
dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
/* Process all fragments */
@@ -213,7 +215,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->tsoh);
EFX_BUG_ON_PARANOID(buffer->skb);
EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+ EFX_BUG_ON_PARANOID(!buffer->continuation);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
@@ -233,7 +235,6 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
} while (len);
/* Transfer ownership of the unmapping to the final buffer */
- buffer->unmap_addr = unmap_addr;
buffer->unmap_single = unmap_single;
buffer->unmap_len = unmap_len;
unmap_len = 0;
@@ -247,14 +248,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
page_offset = fragment->page_offset;
i++;
/* Map for DMA */
- unmap_single = 0;
+ unmap_single = false;
dma_addr = pci_map_page(pci_dev, page, page_offset, len,
PCI_DMA_TODEVICE);
}
/* Transfer ownership of the skb to the final buffer */
buffer->skb = skb;
- buffer->continuation = 0;
+ buffer->continuation = false;
/* Pass off to hardware */
falcon_push_buffers(tx_queue);
@@ -287,9 +288,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
}
/* Free the fragment we were mid-way through pushing */
- if (unmap_len)
- pci_unmap_page(pci_dev, unmap_addr, unmap_len,
- PCI_DMA_TODEVICE);
+ if (unmap_len) {
+ if (unmap_single)
+ pci_unmap_single(pci_dev, unmap_addr, unmap_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pci_dev, unmap_addr, unmap_len,
+ PCI_DMA_TODEVICE);
+ }
return rc;
}
@@ -299,8 +305,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
* This removes packets from the TX queue, up to and including the
* specified index.
*/
-static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index)
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -320,7 +326,7 @@ static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
}
efx_dequeue_buffer(tx_queue, buffer);
- buffer->continuation = 1;
+ buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
@@ -367,8 +373,15 @@ inline int efx_xmit(struct efx_nic *efx,
*/
int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
- struct efx_nic *efx = net_dev->priv;
- return efx_xmit(efx, &efx->tx_queue[0], skb);
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
+ else
+ tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
+
+ return efx_xmit(efx, tx_queue, skb);
}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -412,30 +425,25 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
/* Allocate software ring */
txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
- if (!tx_queue->buffer) {
- rc = -ENOMEM;
- goto fail1;
- }
+ if (!tx_queue->buffer)
+ return -ENOMEM;
for (i = 0; i <= efx->type->txd_ring_mask; ++i)
- tx_queue->buffer[i].continuation = 1;
+ tx_queue->buffer[i].continuation = true;
/* Allocate hardware ring */
rc = falcon_probe_tx(tx_queue);
if (rc)
- goto fail2;
+ goto fail;
return 0;
- fail2:
+ fail:
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
- fail1:
- tx_queue->used = 0;
-
return rc;
}
-int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
{
EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
@@ -446,7 +454,7 @@ int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
BUG_ON(tx_queue->stopped);
/* Set up TX descriptor ring */
- return falcon_init_tx(tx_queue);
+ falcon_init_tx(tx_queue);
}
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -461,7 +469,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
buffer = &tx_queue->buffer[tx_queue->read_count &
tx_queue->efx->type->txd_ring_mask];
efx_dequeue_buffer(tx_queue, buffer);
- buffer->continuation = 1;
+ buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
@@ -494,7 +502,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
- tx_queue->used = 0;
}
@@ -509,7 +516,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
/* Number of bytes inserted at the start of a TSO header buffer,
* similar to NET_IP_ALIGN.
*/
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#define TSOH_OFFSET 0
#else
#define TSOH_OFFSET NET_IP_ALIGN
@@ -533,47 +540,37 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
/**
* struct tso_state - TSO state for an SKB
- * @remaining_len: Bytes of data we've yet to segment
+ * @out_len: Remaining length in current segment
* @seqnum: Current sequence number
+ * @ipv4_id: Current IPv4 ID, host endian
* @packet_space: Remaining space in current packet
- * @ifc: Input fragment cursor.
- * Where we are in the current fragment of the incoming SKB. These
- * values get updated in place when we split a fragment over
- * multiple packets.
- * @p: Parameters.
- * These values are set once at the start of the TSO send and do
- * not get changed as the routine progresses.
+ * @dma_addr: DMA address of current position
+ * @in_len: Remaining length in current SKB fragment
+ * @unmap_len: Length of SKB fragment
+ * @unmap_addr: DMA address of SKB fragment
+ * @unmap_single: DMA single vs page mapping flag
+ * @header_len: Number of bytes of header
+ * @full_packet_size: Number of bytes to put in each outgoing segment
*
* The state used during segmentation. It is put into this data structure
* just to make it easy to pass into inline functions.
*/
struct tso_state {
- unsigned remaining_len;
+ /* Output position */
+ unsigned out_len;
unsigned seqnum;
+ unsigned ipv4_id;
unsigned packet_space;
- struct {
- /* DMA address of current position */
- dma_addr_t dma_addr;
- /* Remaining length */
- unsigned int len;
- /* DMA address and length of the whole fragment */
- unsigned int unmap_len;
- dma_addr_t unmap_addr;
- struct page *page;
- unsigned page_off;
- } ifc;
-
- struct {
- /* The number of bytes of header */
- unsigned int header_length;
-
- /* The number of bytes to put in each outgoing segment. */
- int full_packet_size;
-
- /* Current IPv4 ID, host endian. */
- unsigned ipv4_id;
- } p;
+ /* Input position */
+ dma_addr_t dma_addr;
+ unsigned in_len;
+ unsigned unmap_len;
+ dma_addr_t unmap_addr;
+ bool unmap_single;
+
+ unsigned header_len;
+ int full_packet_size;
};
@@ -581,11 +578,24 @@ struct tso_state {
* Verify that our various assumptions about sk_buffs and the conditions
* under which TSO will be attempted hold true.
*/
-static inline void efx_tso_check_safe(const struct sk_buff *skb)
+static void efx_tso_check_safe(struct sk_buff *skb)
{
- EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
+ __be16 protocol = skb->protocol;
+
EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
- skb->protocol);
+ protocol);
+ if (protocol == htons(ETH_P_8021Q)) {
+ /* Find the encapsulated protocol; reset network header
+ * and transport header based on that. */
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ skb_set_network_header(skb, sizeof(*veh));
+ if (protocol == htons(ETH_P_IP))
+ skb_set_transport_header(skb, sizeof(*veh) +
+ 4 * ip_hdr(skb)->ihl);
+ }
+
+ EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
+ (tcp_hdr(skb)->doff << 2u)) >
@@ -685,18 +695,14 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
* @tx_queue: Efx TX queue
* @dma_addr: DMA address of fragment
* @len: Length of fragment
- * @skb: Only non-null for end of last segment
- * @end_of_packet: True if last fragment in a packet
- * @unmap_addr: DMA address of fragment for unmapping
- * @unmap_len: Only set this in last segment of a fragment
+ * @final_buffer: The final buffer inserted into the queue
*
* Push descriptors onto the TX queue. Return 0 on success or 1 if
* @tx_queue full.
*/
static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned len,
- const struct sk_buff *skb, int end_of_packet,
- dma_addr_t unmap_addr, unsigned unmap_len)
+ struct efx_tx_buffer **final_buffer)
{
struct efx_tx_buffer *buffer;
struct efx_nic *efx = tx_queue->efx;
@@ -724,8 +730,10 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->type->txd_ring_mask - 1 - fill_level;
- if (unlikely(q_space-- <= 0))
+ if (unlikely(q_space-- <= 0)) {
+ *final_buffer = NULL;
return 1;
+ }
smp_mb();
--tx_queue->stopped;
}
@@ -742,7 +750,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
EFX_BUG_ON_PARANOID(buffer->skb);
- EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+ EFX_BUG_ON_PARANOID(!buffer->continuation);
EFX_BUG_ON_PARANOID(buffer->tsoh);
buffer->dma_addr = dma_addr;
@@ -765,10 +773,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(!len);
buffer->len = len;
- buffer->skb = skb;
- buffer->continuation = !end_of_packet;
- buffer->unmap_addr = unmap_addr;
- buffer->unmap_len = unmap_len;
+ *final_buffer = buffer;
return 0;
}
@@ -780,8 +785,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* a single fragment, and we know it doesn't cross a page boundary. It
* also allows us to not worry about end-of-packet etc.
*/
-static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
- struct efx_tso_header *tsoh, unsigned len)
+static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh, unsigned len)
{
struct efx_tx_buffer *buffer;
@@ -791,7 +796,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
EFX_BUG_ON_PARANOID(buffer->skb);
- EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+ EFX_BUG_ON_PARANOID(!buffer->continuation);
EFX_BUG_ON_PARANOID(buffer->tsoh);
buffer->len = len;
buffer->dma_addr = tsoh->dma_addr;
@@ -805,6 +810,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
+ dma_addr_t unmap_addr;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
@@ -814,11 +820,18 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
buffer->len = 0;
- buffer->continuation = 1;
+ buffer->continuation = true;
if (buffer->unmap_len) {
- pci_unmap_page(tx_queue->efx->pci_dev,
- buffer->unmap_addr,
- buffer->unmap_len, PCI_DMA_TODEVICE);
+ unmap_addr = (buffer->dma_addr + buffer->len -
+ buffer->unmap_len);
+ if (buffer->unmap_single)
+ pci_unmap_single(tx_queue->efx->pci_dev,
+ unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(tx_queue->efx->pci_dev,
+ unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
}
}
@@ -826,50 +839,57 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
/* Parse the SKB header and initialise state. */
-static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static void tso_start(struct tso_state *st, const struct sk_buff *skb)
{
/* All ethernet/IP/TCP headers combined size is TCP header size
* plus offset of TCP header relative to start of packet.
*/
- st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
- + PTR_DIFF(tcp_hdr(skb), skb->data));
- st->p.full_packet_size = (st->p.header_length
- + skb_shinfo(skb)->gso_size);
+ st->header_len = ((tcp_hdr(skb)->doff << 2u)
+ + PTR_DIFF(tcp_hdr(skb), skb->data));
+ st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
- st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
+ st->ipv4_id = ntohs(ip_hdr(skb)->id);
st->seqnum = ntohl(tcp_hdr(skb)->seq);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
- st->packet_space = st->p.full_packet_size;
- st->remaining_len = skb->len - st->p.header_length;
+ st->packet_space = st->full_packet_size;
+ st->out_len = skb->len - st->header_len;
+ st->unmap_len = 0;
+ st->unmap_single = false;
}
-
-/**
- * tso_get_fragment - record fragment details and map for DMA
- * @st: TSO state
- * @efx: Efx NIC
- * @data: Pointer to fragment data
- * @len: Length of fragment
- *
- * Record fragment details and map for DMA. Return 0 on success, or
- * -%ENOMEM if DMA mapping fails.
- */
-static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
- int len, struct page *page, int page_off)
+static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+ skb_frag_t *frag)
{
+ st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
+ frag->page_offset, frag->size,
+ PCI_DMA_TODEVICE);
+ if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+ st->unmap_single = false;
+ st->unmap_len = frag->size;
+ st->in_len = frag->size;
+ st->dma_addr = st->unmap_addr;
+ return 0;
+ }
+ return -ENOMEM;
+}
- st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
- len, PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
- st->ifc.unmap_len = len;
- st->ifc.len = len;
- st->ifc.dma_addr = st->ifc.unmap_addr;
- st->ifc.page = page;
- st->ifc.page_off = page_off;
+static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
+ const struct sk_buff *skb)
+{
+ int hl = st->header_len;
+ int len = skb_headlen(skb) - hl;
+
+ st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
+ len, PCI_DMA_TODEVICE);
+ if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+ st->unmap_single = true;
+ st->unmap_len = len;
+ st->in_len = len;
+ st->dma_addr = st->unmap_addr;
return 0;
}
return -ENOMEM;
@@ -886,36 +906,45 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
* of fragment or end-of-packet. Return 0 on success, 1 if not enough
* space in @tx_queue.
*/
-static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb,
- struct tso_state *st)
+static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
{
-
+ struct efx_tx_buffer *buffer;
int n, end_of_packet, rc;
- if (st->ifc.len == 0)
+ if (st->in_len == 0)
return 0;
if (st->packet_space == 0)
return 0;
- EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
+ EFX_BUG_ON_PARANOID(st->in_len <= 0);
EFX_BUG_ON_PARANOID(st->packet_space <= 0);
- n = min(st->ifc.len, st->packet_space);
+ n = min(st->in_len, st->packet_space);
st->packet_space -= n;
- st->remaining_len -= n;
- st->ifc.len -= n;
- st->ifc.page_off += n;
- end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
-
- rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
- st->remaining_len ? NULL : skb,
- end_of_packet, st->ifc.unmap_addr,
- st->ifc.len ? 0 : st->ifc.unmap_len);
-
- st->ifc.dma_addr += n;
+ st->out_len -= n;
+ st->in_len -= n;
+
+ rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
+ if (likely(rc == 0)) {
+ if (st->out_len == 0)
+ /* Transfer ownership of the skb */
+ buffer->skb = skb;
+
+ end_of_packet = st->out_len == 0 || st->packet_space == 0;
+ buffer->continuation = !end_of_packet;
+
+ if (st->in_len == 0) {
+ /* Transfer ownership of the pci mapping */
+ buffer->unmap_len = st->unmap_len;
+ buffer->unmap_single = st->unmap_single;
+ st->unmap_len = 0;
+ }
+ }
+ st->dma_addr += n;
return rc;
}
@@ -929,9 +958,9 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
* Generate a new header and prepare for the new packet. Return 0 on
* success, or -1 if failed to alloc header.
*/
-static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb,
- struct tso_state *st)
+static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
{
struct efx_tso_header *tsoh;
struct iphdr *tsoh_iph;
@@ -940,7 +969,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
u8 *header;
/* Allocate a DMA-mapped header buffer. */
- if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
+ if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
if (tx_queue->tso_headers_free == NULL) {
if (efx_tsoh_block_alloc(tx_queue))
return -1;
@@ -951,7 +980,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
tsoh->unmap_len = 0;
} else {
tx_queue->tso_long_headers++;
- tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
+ tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
if (unlikely(!tsoh))
return -1;
}
@@ -961,33 +990,32 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
/* Copy and update the headers. */
- memcpy(header, skb->data, st->p.header_length);
+ memcpy(header, skb->data, st->header_len);
tsoh_th->seq = htonl(st->seqnum);
st->seqnum += skb_shinfo(skb)->gso_size;
- if (st->remaining_len > skb_shinfo(skb)->gso_size) {
+ if (st->out_len > skb_shinfo(skb)->gso_size) {
/* This packet will not finish the TSO burst. */
- ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
+ ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
tsoh_th->fin = 0;
tsoh_th->psh = 0;
} else {
/* This packet will be the last in the TSO burst. */
- ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
- + st->remaining_len);
+ ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
tsoh_th->fin = tcp_hdr(skb)->fin;
tsoh_th->psh = tcp_hdr(skb)->psh;
}
tsoh_iph->tot_len = htons(ip_length);
/* Linux leaves suitable gaps in the IP ID space for us to fill. */
- tsoh_iph->id = htons(st->p.ipv4_id);
- st->p.ipv4_id++;
+ tsoh_iph->id = htons(st->ipv4_id);
+ st->ipv4_id++;
st->packet_space = skb_shinfo(skb)->gso_size;
++tx_queue->tso_packets;
/* Form a descriptor for this header. */
- efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
+ efx_tso_put_header(tx_queue, tsoh, st->header_len);
return 0;
}
@@ -1005,11 +1033,11 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
* %NETDEV_TX_OK or %NETDEV_TX_BUSY.
*/
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb)
+ struct sk_buff *skb)
{
+ struct efx_nic *efx = tx_queue->efx;
int frag_i, rc, rc2 = NETDEV_TX_OK;
struct tso_state state;
- skb_frag_t *f;
/* Verify TSO is safe - these checks should never fail. */
efx_tso_check_safe(skb);
@@ -1021,29 +1049,16 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Assume that skb header area contains exactly the headers, and
* all payload is in the frag list.
*/
- if (skb_headlen(skb) == state.p.header_length) {
+ if (skb_headlen(skb) == state.header_len) {
/* Grab the first payload fragment. */
EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
frag_i = 0;
- f = &skb_shinfo(skb)->frags[frag_i];
- rc = tso_get_fragment(&state, tx_queue->efx,
- f->size, f->page, f->page_offset);
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
if (rc)
goto mem_err;
} else {
- /* It may look like this code fragment assumes that the
- * skb->data portion does not cross a page boundary, but
- * that is not the case. It is guaranteed to be direct
- * mapped memory, and therefore is physically contiguous,
- * and so DMA will work fine. kmap_atomic() on this region
- * will just return the direct mapping, so that will work
- * too.
- */
- int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
- int hl = state.p.header_length;
- rc = tso_get_fragment(&state, tx_queue->efx,
- skb_headlen(skb) - hl,
- virt_to_page(skb->data), page_off + hl);
+ rc = tso_get_head_fragment(&state, efx, skb);
if (rc)
goto mem_err;
frag_i = -1;
@@ -1058,13 +1073,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto stop;
/* Move onto the next fragment? */
- if (state.ifc.len == 0) {
+ if (state.in_len == 0) {
if (++frag_i >= skb_shinfo(skb)->nr_frags)
/* End of payload reached. */
break;
- f = &skb_shinfo(skb)->frags[frag_i];
- rc = tso_get_fragment(&state, tx_queue->efx,
- f->size, f->page, f->page_offset);
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
if (rc)
goto mem_err;
}
@@ -1082,8 +1096,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
return NETDEV_TX_OK;
mem_err:
- EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
- " error\n");
+ EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
dev_kfree_skb_any((struct sk_buff *)skb);
goto unwind;
@@ -1092,9 +1105,19 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Stop the queue if it wasn't stopped before. */
if (tx_queue->stopped == 1)
- efx_stop_queue(tx_queue->efx);
+ efx_stop_queue(efx);
unwind:
+ /* Free the DMA mapping we were in the process of writing out */
+ if (state.unmap_len) {
+ if (state.unmap_single)
+ pci_unmap_single(efx->pci_dev, state.unmap_addr,
+ state.unmap_len, PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(efx->pci_dev, state.unmap_addr,
+ state.unmap_len, PCI_DMA_TODEVICE);
+ }
+
efx_enqueue_unwind(tx_queue);
return rc2;
}
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
index 1526a73b4b5..5e1cc234e42 100644
--- a/drivers/net/sfc/tx.h
+++ b/drivers/net/sfc/tx.h
@@ -15,7 +15,7 @@
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-int efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 35ab19c27f8..fa7b49d6928 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -20,14 +20,10 @@
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
-/* SNAP frames have TOBE_DISC set */
-#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
/* RX PCIe double split performance issue */
#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* TX pkt parser problem with <= 16 byte TXes */
#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
-/* XGXS and XAUI reset sequencing in SW */
-#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
/* Low rate CRC errors require XAUI reset */
#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index f3684ad2888..276151df3a7 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -40,7 +40,7 @@ void xfp_set_led(struct efx_nic *p, int led, int mode)
}
struct xfp_phy_data {
- int tx_disabled;
+ enum efx_phy_mode phy_mode;
};
#define XFP_MAX_RESET_TIME 500
@@ -93,7 +93,7 @@ static int xfp_phy_init(struct efx_nic *efx)
" %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
MDIO_ID_REV(devid));
- phy_data->tx_disabled = efx->tx_disabled;
+ phy_data->phy_mode = efx->phy_mode;
rc = xfp_reset_phy(efx);
@@ -136,13 +136,14 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
struct xfp_phy_data *phy_data = efx->phy_data;
/* Reset the PHY when moving from tx off to tx on */
- if (phy_data->tx_disabled && !efx->tx_disabled)
+ if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
+ (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
xfp_reset_phy(efx);
mdio_clause45_transmit_disable(efx);
mdio_clause45_phy_reconfigure(efx);
- phy_data->tx_disabled = efx->tx_disabled;
+ phy_data->phy_mode = efx->phy_mode;
efx->link_up = xfp_link_ok(efx);
efx->link_options = GM_LPA_10000FULL;
}
@@ -151,7 +152,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
static void xfp_phy_fini(struct efx_nic *efx)
{
/* Clobber the LED if it was blinking */
- efx->board_info.blink(efx, 0);
+ efx->board_info.blink(efx, false);
/* Free the context block */
kfree(efx->phy_data);
@@ -164,7 +165,6 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
.check_hw = xfp_phy_check_hw,
.fini = xfp_phy_fini,
.clear_interrupt = xfp_phy_clear_interrupt,
- .reset_xaui = efx_port_dummy_op_void,
.mmds = XFP_REQUIRED_DEVS,
.loopbacks = XFP_LOOPBACKS,
};