diff options
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r-- | drivers/net/qlge/qlge.h | 229 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_dbg.c | 180 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_ethtool.c | 290 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 490 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_mpi.c | 210 |
5 files changed, 1238 insertions, 161 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index c2383adcd52..862c1aaf386 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -16,7 +16,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00-b3" +#define DRV_VERSION "v1.00.00.23.00.00-01" #define PFX "qlge: " #define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ @@ -54,8 +54,10 @@ #define RX_RING_SHADOW_SPACE (sizeof(u64) + \ MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) -#define SMALL_BUFFER_SIZE 256 -#define LARGE_BUFFER_SIZE PAGE_SIZE +#define SMALL_BUFFER_SIZE 512 +#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) +#define LARGE_BUFFER_MAX_SIZE 8192 +#define LARGE_BUFFER_MIN_SIZE 2048 #define MAX_SPLIT_SIZE 1023 #define QLGE_SB_PAD 32 @@ -795,6 +797,7 @@ enum { MB_WOL_BCAST = (1 << 5), MB_WOL_LINK_UP = (1 << 6), MB_WOL_LINK_DOWN = (1 << 7), + MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */ MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ @@ -804,12 +807,27 @@ enum { MB_CMD_SET_PORT_CFG = 0x00000122, MB_CMD_GET_PORT_CFG = 0x00000123, MB_CMD_GET_LINK_STS = 0x00000124, + MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */ + QL_LED_BLINK = 0x03e803e8, + MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */ MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ MB_SET_MPI_TFK_STOP = (1 << 0), MB_SET_MPI_TFK_RESUME = (1 << 1), MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ MB_GET_MPI_TFK_STOPPED = (1 << 0), MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), + /* Sub-commands for IDC request. + * This describes the reason for the + * IDC request. + */ + MB_CMD_IOP_NONE = 0x0000, + MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001, + MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002, + MB_CMD_IOP_PREP_LINK_DOWN = 0x0010, + MB_CMD_IOP_DVR_START = 0x0100, + MB_CMD_IOP_FLASH_ACC = 0x0101, + MB_CMD_IOP_RESTART_MPI = 0x0102, + MB_CMD_IOP_CORE_DUMP_MPI = 0x0103, /* Mailbox Command Status. */ MB_CMD_STS_GOOD = 0x00004000, /* Success. */ @@ -1201,9 +1219,17 @@ struct tx_ring_desc { struct tx_ring_desc *next; }; +struct page_chunk { + struct page *page; /* master page */ + char *va; /* virt addr for this chunk */ + u64 map; /* mapping for master */ + unsigned int offset; /* offset for this chunk */ + unsigned int last_flag; /* flag set for last chunk in page */ +}; + struct bq_desc { union { - struct page *lbq_page; + struct page_chunk pg_chunk; struct sk_buff *skb; } p; __le64 *addr; @@ -1237,6 +1263,9 @@ struct tx_ring { atomic_t queue_stopped; /* Turns queue off when full. */ struct delayed_work tx_work; struct ql_adapter *qdev; + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; }; /* @@ -1272,6 +1301,7 @@ struct rx_ring { dma_addr_t lbq_base_dma; void *lbq_base_indirect; dma_addr_t lbq_base_indirect_dma; + struct page_chunk pg_chunk; /* current page for chunks */ struct bq_desc *lbq; /* array of control blocks */ void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ u32 lbq_prod_idx; /* current sw prod idx */ @@ -1302,6 +1332,11 @@ struct rx_ring { struct napi_struct napi; u8 reserved; struct ql_adapter *qdev; + u64 rx_packets; + u64 rx_multicast; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; }; /* @@ -1363,6 +1398,174 @@ struct nic_stats { u64 rx_1024_to_1518_pkts; u64 rx_1519_to_max_pkts; u64 rx_len_err_pkts; + /* + * These stats come from offset 500h to 5C8h + * in the XGMAC register. + */ + u64 tx_cbfc_pause_frames0; + u64 tx_cbfc_pause_frames1; + u64 tx_cbfc_pause_frames2; + u64 tx_cbfc_pause_frames3; + u64 tx_cbfc_pause_frames4; + u64 tx_cbfc_pause_frames5; + u64 tx_cbfc_pause_frames6; + u64 tx_cbfc_pause_frames7; + u64 rx_cbfc_pause_frames0; + u64 rx_cbfc_pause_frames1; + u64 rx_cbfc_pause_frames2; + u64 rx_cbfc_pause_frames3; + u64 rx_cbfc_pause_frames4; + u64 rx_cbfc_pause_frames5; + u64 rx_cbfc_pause_frames6; + u64 rx_cbfc_pause_frames7; + u64 rx_nic_fifo_drop; +}; + +/* Address/Length pairs for the coredump. */ +enum { + MPI_CORE_REGS_ADDR = 0x00030000, + MPI_CORE_REGS_CNT = 127, + MPI_CORE_SH_REGS_CNT = 16, + TEST_REGS_ADDR = 0x00001000, + TEST_REGS_CNT = 23, + RMII_REGS_ADDR = 0x00001040, + RMII_REGS_CNT = 64, + FCMAC1_REGS_ADDR = 0x00001080, + FCMAC2_REGS_ADDR = 0x000010c0, + FCMAC_REGS_CNT = 64, + FC1_MBX_REGS_ADDR = 0x00001100, + FC2_MBX_REGS_ADDR = 0x00001240, + FC_MBX_REGS_CNT = 64, + IDE_REGS_ADDR = 0x00001140, + IDE_REGS_CNT = 64, + NIC1_MBX_REGS_ADDR = 0x00001180, + NIC2_MBX_REGS_ADDR = 0x00001280, + NIC_MBX_REGS_CNT = 64, + SMBUS_REGS_ADDR = 0x00001200, + SMBUS_REGS_CNT = 64, + I2C_REGS_ADDR = 0x00001fc0, + I2C_REGS_CNT = 64, + MEMC_REGS_ADDR = 0x00003000, + MEMC_REGS_CNT = 256, + PBUS_REGS_ADDR = 0x00007c00, + PBUS_REGS_CNT = 256, + MDE_REGS_ADDR = 0x00010000, + MDE_REGS_CNT = 6, + CODE_RAM_ADDR = 0x00020000, + CODE_RAM_CNT = 0x2000, + MEMC_RAM_ADDR = 0x00100000, + MEMC_RAM_CNT = 0x2000, +}; + +#define MPI_COREDUMP_COOKIE 0x5555aaaa +struct mpi_coredump_global_header { + u32 cookie; + u8 idString[16]; + u32 timeLo; + u32 timeHi; + u32 imageSize; + u32 headerSize; + u8 info[220]; +}; + +struct mpi_coredump_segment_header { + u32 cookie; + u32 segNum; + u32 segSize; + u32 extra; + u8 description[16]; +}; + +/* Reg dump segment numbers. */ +enum { + CORE_SEG_NUM = 1, + TEST_LOGIC_SEG_NUM = 2, + RMII_SEG_NUM = 3, + FCMAC1_SEG_NUM = 4, + FCMAC2_SEG_NUM = 5, + FC1_MBOX_SEG_NUM = 6, + IDE_SEG_NUM = 7, + NIC1_MBOX_SEG_NUM = 8, + SMBUS_SEG_NUM = 9, + FC2_MBOX_SEG_NUM = 10, + NIC2_MBOX_SEG_NUM = 11, + I2C_SEG_NUM = 12, + MEMC_SEG_NUM = 13, + PBUS_SEG_NUM = 14, + MDE_SEG_NUM = 15, + NIC1_CONTROL_SEG_NUM = 16, + NIC2_CONTROL_SEG_NUM = 17, + NIC1_XGMAC_SEG_NUM = 18, + NIC2_XGMAC_SEG_NUM = 19, + WCS_RAM_SEG_NUM = 20, + MEMC_RAM_SEG_NUM = 21, + XAUI_AN_SEG_NUM = 22, + XAUI_HSS_PCS_SEG_NUM = 23, + XFI_AN_SEG_NUM = 24, + XFI_TRAIN_SEG_NUM = 25, + XFI_HSS_PCS_SEG_NUM = 26, + XFI_HSS_TX_SEG_NUM = 27, + XFI_HSS_RX_SEG_NUM = 28, + XFI_HSS_PLL_SEG_NUM = 29, + MISC_NIC_INFO_SEG_NUM = 30, + INTR_STATES_SEG_NUM = 31, + CAM_ENTRIES_SEG_NUM = 32, + ROUTING_WORDS_SEG_NUM = 33, + ETS_SEG_NUM = 34, + PROBE_DUMP_SEG_NUM = 35, + ROUTING_INDEX_SEG_NUM = 36, + MAC_PROTOCOL_SEG_NUM = 37, + XAUI2_AN_SEG_NUM = 38, + XAUI2_HSS_PCS_SEG_NUM = 39, + XFI2_AN_SEG_NUM = 40, + XFI2_TRAIN_SEG_NUM = 41, + XFI2_HSS_PCS_SEG_NUM = 42, + XFI2_HSS_TX_SEG_NUM = 43, + XFI2_HSS_RX_SEG_NUM = 44, + XFI2_HSS_PLL_SEG_NUM = 45, + SEM_REGS_SEG_NUM = 50 + +}; + +struct ql_nic_misc { + u32 rx_ring_count; + u32 tx_ring_count; + u32 intr_count; + u32 function; +}; + +struct ql_reg_dump { + + /* segment 0 */ + struct mpi_coredump_global_header mpi_global_header; + + /* segment 16 */ + struct mpi_coredump_segment_header nic_regs_seg_hdr; + u32 nic_regs[64]; + + /* segment 30 */ + struct mpi_coredump_segment_header misc_nic_seg_hdr; + struct ql_nic_misc misc_nic_info; + + /* segment 31 */ + /* one interrupt state for each CQ */ + struct mpi_coredump_segment_header intr_states_seg_hdr; + u32 intr_states[MAX_CPUS]; + + /* segment 32 */ + /* 3 cam words each for 16 unicast, + * 2 cam words for each of 32 multicast. + */ + struct mpi_coredump_segment_header cam_entries_seg_hdr; + u32 cam_entries[(16 * 3) + (32 * 3)]; + + /* segment 33 */ + struct mpi_coredump_segment_header nic_routing_words_seg_hdr; + u32 nic_routing_words[16]; + + /* segment 34 */ + struct mpi_coredump_segment_header ets_seg_hdr; + u32 ets[8+2]; }; /* @@ -1398,6 +1601,8 @@ enum { QL_ALLMULTI = 6, QL_PORT_CFG = 7, QL_CAM_RT_SET = 8, + QL_SELFTEST = 9, + QL_LB_LINK_UP = 10, }; /* link_status bit definitions */ @@ -1505,6 +1710,7 @@ struct ql_adapter { struct rx_ring rx_ring[MAX_RX_RINGS]; struct tx_ring tx_ring[MAX_TX_RINGS]; + unsigned int lbq_buf_order; int rx_csum; u32 default_rx_queue; @@ -1519,11 +1725,11 @@ struct ql_adapter { u32 port_init; u32 link_status; u32 link_config; + u32 led_config; u32 max_frame_size; union flash_params flash; - struct net_device_stats stats; struct workqueue_struct *workqueue; struct delayed_work asic_reset_work; struct delayed_work mpi_reset_work; @@ -1533,6 +1739,7 @@ struct ql_adapter { struct completion ide_completion; struct nic_operations *nic_ops; u16 device_id; + atomic_t lb_count; }; /* @@ -1611,10 +1818,22 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev); int ql_cam_route_initialize(struct ql_adapter *qdev); int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); int ql_mb_about_fw(struct ql_adapter *qdev); +int ql_wol(struct ql_adapter *qdev); +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); +int ql_mb_get_led_cfg(struct ql_adapter *qdev); void ql_link_on(struct ql_adapter *qdev); void ql_link_off(struct ql_adapter *qdev); int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); +int ql_mb_get_port_cfg(struct ql_adapter *qdev); +int ql_mb_set_port_cfg(struct ql_adapter *qdev); int ql_wait_fifo_empty(struct ql_adapter *qdev); +void ql_gen_reg_dump(struct ql_adapter *qdev, + struct ql_reg_dump *mpi_coredump); +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); +void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); #if 1 #define QL_ALL_DUMP diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c index aa88cb3f41c..9f58c471076 100644 --- a/drivers/net/qlge/qlge_dbg.c +++ b/drivers/net/qlge/qlge_dbg.c @@ -1,5 +1,185 @@ #include "qlge.h" + +static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) +{ + int status = 0; + int i; + + for (i = 0; i < 8; i++, buf++) { + ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, NIC_ETS); + } + + for (i = 0; i < 2; i++, buf++) { + ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, CNA_ETS); + } + + return status; +} + +static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) +{ + int i; + + for (i = 0; i < qdev->rx_ring_count; i++, buf++) { + ql_write32(qdev, INTR_EN, + qdev->intr_context[i].intr_read_mask); + *buf = ql_read32(qdev, INTR_EN); + } +} + +static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) +{ + int i, status; + u32 value[3]; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_CAM_MAC, i, value); + if (status) { + QPRINTK(qdev, DRV, ERR, + "Failed read of mac index register.\n"); + goto err; + } + *buf++ = value[0]; /* lower MAC address */ + *buf++ = value[1]; /* upper MAC address */ + *buf++ = value[2]; /* output */ + } + for (i = 0; i < 32; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_MULTI_MAC, i, value); + if (status) { + QPRINTK(qdev, DRV, ERR, + "Failed read of mac index register.\n"); + goto err; + } + *buf++ = value[0]; /* lower Mcast address */ + *buf++ = value[1]; /* upper Mcast address */ + } +err: + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + return status; +} + +static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) +{ + int status; + u32 value, i; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_routing_reg(qdev, i, &value); + if (status) { + QPRINTK(qdev, DRV, ERR, + "Failed read of routing index register.\n"); + goto err; + } else { + *buf++ = value; + } + } +err: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Create a coredump segment header */ +static void ql_build_coredump_seg_header( + struct mpi_coredump_segment_header *seg_hdr, + u32 seg_number, u32 seg_size, u8 *desc) +{ + memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); + seg_hdr->cookie = MPI_COREDUMP_COOKIE; + seg_hdr->segNum = seg_number; + seg_hdr->segSize = seg_size; + memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); +} + +void ql_gen_reg_dump(struct ql_adapter *qdev, + struct ql_reg_dump *mpi_coredump) +{ + int i, status; + + + memset(&(mpi_coredump->mpi_global_header), 0, + sizeof(struct mpi_coredump_global_header)); + mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; + mpi_coredump->mpi_global_header.headerSize = + sizeof(struct mpi_coredump_global_header); + mpi_coredump->mpi_global_header.imageSize = + sizeof(struct ql_reg_dump); + memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + sizeof(mpi_coredump->mpi_global_header.idString)); + + + /* segment 16 */ + ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, + MISC_NIC_INFO_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->misc_nic_info), + "MISC NIC INFO"); + mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; + mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; + mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; + mpi_coredump->misc_nic_info.function = qdev->func; + + /* Segment 16, Rev C. Step 18 */ + ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, + NIC1_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_regs), + "NIC Registers"); + /* Get generic reg dump */ + for (i = 0; i < 64; i++) + mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); + + /* Segment 31 */ + /* Get indexed register values. */ + ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, + INTR_STATES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->intr_states), + "INTR States"); + ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); + + ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, + CAM_ENTRIES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->cam_entries), + "CAM Entries"); + status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); + if (status) + return; + + ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, + ROUTING_WORDS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_routing_words), + "Routing Words"); + status = ql_get_routing_entries(qdev, + &mpi_coredump->nic_routing_words[0]); + if (status) + return; + + /* Segment 34 (Rev C. step 23) */ + ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, + ETS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ets), + "ETS Registers"); + status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); + if (status) + return; +} + #ifdef QL_REG_DUMP static void ql_dump_intr_states(struct ql_adapter *qdev) { diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index 52073946bce..058fa0a48c6 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c @@ -36,6 +36,11 @@ #include "qlge.h" +static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { + "Loopback test (offline)" +}; +#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) + static int ql_update_ring_coalescing(struct ql_adapter *qdev) { int i, status = 0; @@ -132,6 +137,41 @@ static void ql_update_stats(struct ql_adapter *qdev) iter++; } + /* + * Get Per-priority TX pause frame counter statistics. + */ + for (i = 0x500; i < 0x540; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + QPRINTK(qdev, DRV, ERR, + "Error reading status register 0x%.04x.\n", i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get Per-priority RX pause frame counter statistics. + */ + for (i = 0x568; i < 0x5a8; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + QPRINTK(qdev, DRV, ERR, + "Error reading status register 0x%.04x.\n", i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get RX NIC FIFO DROP statistics. + */ + if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { + QPRINTK(qdev, DRV, ERR, + "Error reading status register 0x%.04x.\n", i); + goto end; + } else + *iter = data; end: ql_sem_unlock(qdev, qdev->xg_sem_mask); quit: @@ -185,6 +225,23 @@ static char ql_stats_str_arr[][ETH_GSTRING_LEN] = { {"rx_1024_to_1518_pkts"}, {"rx_1519_to_max_pkts"}, {"rx_len_err_pkts"}, + {"tx_cbfc_pause_frames0"}, + {"tx_cbfc_pause_frames1"}, + {"tx_cbfc_pause_frames2"}, + {"tx_cbfc_pause_frames3"}, + {"tx_cbfc_pause_frames4"}, + {"tx_cbfc_pause_frames5"}, + {"tx_cbfc_pause_frames6"}, + {"tx_cbfc_pause_frames7"}, + {"rx_cbfc_pause_frames0"}, + {"rx_cbfc_pause_frames1"}, + {"rx_cbfc_pause_frames2"}, + {"rx_cbfc_pause_frames3"}, + {"rx_cbfc_pause_frames4"}, + {"rx_cbfc_pause_frames5"}, + {"rx_cbfc_pause_frames6"}, + {"rx_cbfc_pause_frames7"}, + {"rx_nic_fifo_drop"}, }; static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -199,6 +256,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) static int ql_get_sset_count(struct net_device *dev, int sset) { switch (sset) { + case ETH_SS_TEST: + return QLGE_TEST_LEN; case ETH_SS_STATS: return ARRAY_SIZE(ql_stats_str_arr); default: @@ -257,6 +316,23 @@ ql_get_ethtool_stats(struct net_device *ndev, *data++ = s->rx_1024_to_1518_pkts; *data++ = s->rx_1519_to_max_pkts; *data++ = s->rx_len_err_pkts; + *data++ = s->tx_cbfc_pause_frames0; + *data++ = s->tx_cbfc_pause_frames1; + *data++ = s->tx_cbfc_pause_frames2; + *data++ = s->tx_cbfc_pause_frames3; + *data++ = s->tx_cbfc_pause_frames4; + *data++ = s->tx_cbfc_pause_frames5; + *data++ = s->tx_cbfc_pause_frames6; + *data++ = s->tx_cbfc_pause_frames7; + *data++ = s->rx_cbfc_pause_frames0; + *data++ = s->rx_cbfc_pause_frames1; + *data++ = s->rx_cbfc_pause_frames2; + *data++ = s->rx_cbfc_pause_frames3; + *data++ = s->rx_cbfc_pause_frames4; + *data++ = s->rx_cbfc_pause_frames5; + *data++ = s->rx_cbfc_pause_frames6; + *data++ = s->rx_cbfc_pause_frames7; + *data++ = s->rx_nic_fifo_drop; } static int ql_get_settings(struct net_device *ndev, @@ -302,6 +378,181 @@ static void ql_get_drvinfo(struct net_device *ndev, drvinfo->eedump_len = 0; } +static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + /* What we support. */ + wol->supported = WAKE_MAGIC; + /* What we've currently got set. */ + wol->wolopts = qdev->wol; +} + +static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + qdev->wol = wol->wolopts; + + QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n", + qdev->wol, ndev->name); + if (!qdev->wol) { + u32 wol = 0; + status = ql_mb_wol_mode(qdev, wol); + QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", + (status == 0) ? "cleared sucessfully" : "clear failed", + wol, qdev->ndev->name); + } + + return 0; +} + +static int ql_phys_id(struct net_device *ndev, u32 data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + u32 led_reg, i; + int status; + + /* Save the current LED settings */ + status = ql_mb_get_led_cfg(qdev); + if (status) + return status; + led_reg = qdev->led_config; + + /* Start blinking the led */ + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 10); i++) + ql_mb_set_led_cfg(qdev, QL_LED_BLINK); + + /* Restore LED settings */ + status = ql_mb_set_led_cfg(qdev, led_reg); + if (status) + return status; + + return 0; +} + +static int ql_start_loopback(struct ql_adapter *qdev) +{ + if (netif_carrier_ok(qdev->ndev)) { + set_bit(QL_LB_LINK_UP, &qdev->flags); + netif_carrier_off(qdev->ndev); + } else + clear_bit(QL_LB_LINK_UP, &qdev->flags); + qdev->link_config |= CFG_LOOPBACK_PCS; + return ql_mb_set_port_cfg(qdev); +} + +static void ql_stop_loopback(struct ql_adapter *qdev) +{ + qdev->link_config &= ~CFG_LOOPBACK_PCS; + ql_mb_set_port_cfg(qdev); + if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { + netif_carrier_on(qdev->ndev); + clear_bit(QL_LB_LINK_UP, &qdev->flags); + } +} + +static void ql_create_lb_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +void ql_check_lb_frame(struct ql_adapter *qdev, + struct sk_buff *skb) +{ + unsigned int frame_size = skb->len; + + if ((*(skb->data + 3) == 0xFF) && + (*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + atomic_dec(&qdev->lb_count); + return; + } +} + +static int ql_run_loopback_test(struct ql_adapter *qdev) +{ + int i; + netdev_tx_t rc; + struct sk_buff *skb; + unsigned int size = SMALL_BUF_MAP_SIZE; + + for (i = 0; i < 64; i++) { + skb = netdev_alloc_skb(qdev->ndev, size); + if (!skb) + return -ENOMEM; + + skb->queue_mapping = 0; + skb_put(skb, size); + ql_create_lb_frame(skb, size); + rc = ql_lb_send(skb, qdev->ndev); + if (rc != NETDEV_TX_OK) + return -EPIPE; + atomic_inc(&qdev->lb_count); + } + + ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); + return atomic_read(&qdev->lb_count) ? -EIO : 0; +} + +static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) +{ + *data = ql_start_loopback(qdev); + if (*data) + goto out; + *data = ql_run_loopback_test(qdev); +out: + ql_stop_loopback(qdev); + return *data; +} + +static void ql_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (netif_running(ndev)) { + set_bit(QL_SELFTEST, &qdev->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + if (ql_loopback_test(qdev, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + } else { + /* Online tests */ + data[0] = 0; + } + clear_bit(QL_SELFTEST, &qdev->flags); + } else { + QPRINTK(qdev, DRV, ERR, + "%s: is down, Loopback test will fail.\n", ndev->name); + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ql_get_regs_len(struct net_device *ndev) +{ + return sizeof(struct ql_reg_dump); +} + +static void ql_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + ql_gen_reg_dump(qdev, p); +} + static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct ql_adapter *qdev = netdev_priv(dev); @@ -355,6 +606,37 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) return ql_update_ring_coalescing(qdev); } +static void ql_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + + ql_mb_get_port_cfg(qdev); + if (qdev->link_config & CFG_PAUSE_STD) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ql_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + int status = 0; + + if ((pause->rx_pause) && (pause->tx_pause)) + qdev->link_config |= CFG_PAUSE_STD; + else if (!pause->rx_pause && !pause->tx_pause) + qdev->link_config &= ~CFG_PAUSE_STD; + else + return -EINVAL; + + status = ql_mb_set_port_cfg(qdev); + if (status) + return status; + return status; +} + static u32 ql_get_rx_csum(struct net_device *netdev) { struct ql_adapter *qdev = netdev_priv(netdev); @@ -396,9 +678,17 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value) const struct ethtool_ops qlge_ethtool_ops = { .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, + .get_wol = ql_get_wol, + .set_wol = ql_set_wol, + .get_regs_len = ql_get_regs_len, + .get_regs = ql_get_regs, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_link = ethtool_op_get_link, + .phys_id = ql_phys_id, + .self_test = ql_self_test, + .get_pauseparam = ql_get_pauseparam, + .set_pauseparam = ql_set_pauseparam, .get_rx_csum = ql_get_rx_csum, .set_rx_csum = ql_set_rx_csum, .get_tx_csum = ethtool_op_get_tx_csum, diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index a2fc70a0d0c..707b391afa0 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -69,9 +69,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define MSIX_IRQ 0 #define MSI_IRQ 1 #define LEG_IRQ 2 -static int irq_type = MSIX_IRQ; -module_param(irq_type, int, MSIX_IRQ); -MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); +static int qlge_irq_type = MSIX_IRQ; +module_param(qlge_irq_type, int, MSIX_IRQ); +MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); static struct pci_device_id qlge_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, @@ -1025,6 +1025,11 @@ end: return status; } +static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) +{ + return PAGE_SIZE << qdev->lbq_buf_order; +} + /* Get the next large buffer. */ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) { @@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) return lbq_desc; } +static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); + + pci_dma_sync_single_for_cpu(qdev->pdev, + pci_unmap_addr(lbq_desc, mapaddr), + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); + + /* If it's the last chunk of our master page then + * we unmap it. + */ + if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) + == ql_lbq_block_size(qdev)) + pci_unmap_page(qdev->pdev, + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + return lbq_desc; +} + /* Get the next small buffer. */ static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) { @@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring) ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); } +static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, + struct bq_desc *lbq_desc) +{ + if (!rx_ring->pg_chunk.page) { + u64 map; + rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | + GFP_ATOMIC, + qdev->lbq_buf_order); + if (unlikely(!rx_ring->pg_chunk.page)) { + QPRINTK(qdev, DRV, ERR, + "page allocation failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.offset = 0; + map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, + 0, ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, map)) { + __free_pages(rx_ring->pg_chunk.page, + qdev->lbq_buf_order); + QPRINTK(qdev, DRV, ERR, + "PCI mapping failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.map = map; + rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); + } + + /* Copy the current master pg_chunk info + * to the current descriptor. + */ + lbq_desc->p.pg_chunk = rx_ring->pg_chunk; + + /* Adjust the master page chunk for next + * buffer get. + */ + rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; + if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { + rx_ring->pg_chunk.page = NULL; + lbq_desc->p.pg_chunk.last_flag = 1; + } else { + rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; + get_page(rx_ring->pg_chunk.page); + lbq_desc->p.pg_chunk.last_flag = 0; + } + return 0; +} /* Process (refill) a large buffer queue. */ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) { @@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) u64 map; int i; - while (rx_ring->lbq_free_cnt > 16) { + while (rx_ring->lbq_free_cnt > 32) { for (i = 0; i < 16; i++) { QPRINTK(qdev, RX_STATUS, DEBUG, "lbq: try cleaning clean_idx = %d.\n", clean_idx); lbq_desc = &rx_ring->lbq[clean_idx]; - if (lbq_desc->p.lbq_page == NULL) { - QPRINTK(qdev, RX_STATUS, DEBUG, - "lbq: getting new page for index %d.\n", - lbq_desc->index); - lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); - if (lbq_desc->p.lbq_page == NULL) { - rx_ring->lbq_clean_idx = clean_idx; - QPRINTK(qdev, RX_STATUS, ERR, - "Couldn't get a page.\n"); - return; - } - map = pci_map_page(qdev->pdev, - lbq_desc->p.lbq_page, - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - rx_ring->lbq_clean_idx = clean_idx; - put_page(lbq_desc->p.lbq_page); - lbq_desc->p.lbq_page = NULL; - QPRINTK(qdev, RX_STATUS, ERR, - "PCI mapping failed.\n"); + if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { + QPRINTK(qdev, IFUP, ERR, + "Could not get a page chunk.\n"); return; } + + map = lbq_desc->p.pg_chunk.map + + lbq_desc->p.pg_chunk.offset; pci_unmap_addr_set(lbq_desc, mapaddr, map); - pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); + pci_unmap_len_set(lbq_desc, maplen, + rx_ring->lbq_buf_size); *lbq_desc->addr = cpu_to_le64(map); - } + + pci_dma_sync_single_for_device(qdev->pdev, map, + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); clean_idx++; if (clean_idx == rx_ring->lbq_len) clean_idx = 0; @@ -1147,7 +1210,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) sbq_desc->index); sbq_desc->p.skb = netdev_alloc_skb(qdev->ndev, - rx_ring->sbq_buf_size); + SMALL_BUFFER_SIZE); if (sbq_desc->p.skb == NULL) { QPRINTK(qdev, PROBE, ERR, "Couldn't get an skb.\n"); @@ -1157,8 +1220,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); map = pci_map_single(qdev->pdev, sbq_desc->p.skb->data, - rx_ring->sbq_buf_size / - 2, PCI_DMA_FROMDEVICE); + rx_ring->sbq_buf_size, + PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(qdev->pdev, map)) { QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); rx_ring->sbq_clean_idx = clean_idx; @@ -1168,7 +1231,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) } pci_unmap_addr_set(sbq_desc, mapaddr, map); pci_unmap_len_set(sbq_desc, maplen, - rx_ring->sbq_buf_size / 2); + rx_ring->sbq_buf_size); *sbq_desc->addr = cpu_to_le64(map); } @@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, * chain it to the header buffer's skb and let * it rip. */ - lbq_desc = ql_get_curr_lbuf(rx_ring); - pci_unmap_page(qdev->pdev, - pci_unmap_addr(lbq_desc, - mapaddr), - pci_unmap_len(lbq_desc, maplen), - PCI_DMA_FROMDEVICE); + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); QPRINTK(qdev, RX_STATUS, DEBUG, - "Chaining page to skb.\n"); - skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, - 0, length); + "Chaining page at offset = %d," + "for %d bytes to skb.\n", + lbq_desc->p.pg_chunk.offset, length); + skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); skb->len += length; skb->data_len += length; skb->truesize += length; - lbq_desc->p.lbq_page = NULL; } else { /* * The headers and data are in a single large buffer. We * copy it to a new skb and let it go. This can happen with * jumbo mtu on a non-TCP/UDP frame. */ - lbq_desc = ql_get_curr_lbuf(rx_ring); + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); skb = netdev_alloc_skb(qdev->ndev, length); if (skb == NULL) { QPRINTK(qdev, PROBE, DEBUG, @@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb_reserve(skb, NET_IP_ALIGN); QPRINTK(qdev, RX_STATUS, DEBUG, "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); - skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, - 0, length); + skb_fill_page_desc(skb, 0, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); skb->len += length; skb->data_len += length; skb->truesize += length; length -= length; - lbq_desc->p.lbq_page = NULL; __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? VLAN_ETH_HLEN : ETH_HLEN); @@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, * frames. If the MTU goes up we could * eventually be in trouble. */ - int size, offset, i = 0; - __le64 *bq, bq_array[8]; + int size, i = 0; sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, pci_unmap_addr(sbq_desc, mapaddr), @@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, QPRINTK(qdev, RX_STATUS, DEBUG, "%d bytes of headers & data in chain of large.\n", length); skb = sbq_desc->p.skb; - bq = &bq_array[0]; - memcpy(bq, skb->data, sizeof(bq_array)); sbq_desc->p.skb = NULL; skb_reserve(skb, NET_IP_ALIGN); - } else { - QPRINTK(qdev, RX_STATUS, DEBUG, - "Headers in small, %d bytes of data in chain of large.\n", length); - bq = (__le64 *)sbq_desc->p.skb->data; } while (length > 0) { - lbq_desc = ql_get_curr_lbuf(rx_ring); - pci_unmap_page(qdev->pdev, - pci_unmap_addr(lbq_desc, - mapaddr), - pci_unmap_len(lbq_desc, - maplen), - PCI_DMA_FROMDEVICE); - size = (length < PAGE_SIZE) ? length : PAGE_SIZE; - offset = 0; + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + size = (length < rx_ring->lbq_buf_size) ? length : + rx_ring->lbq_buf_size; QPRINTK(qdev, RX_STATUS, DEBUG, "Adding page %d to skb for %d bytes.\n", i, size); - skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, - offset, size); + skb_fill_page_desc(skb, i, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + size); skb->len += size; skb->data_len += size; skb->truesize += size; length -= size; - lbq_desc->p.lbq_page = NULL; - bq++; i++; } __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? @@ -1613,6 +1661,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, if (unlikely(!skb)) { QPRINTK(qdev, RX_STATUS, DEBUG, "No skb available, drop packet.\n"); + rx_ring->rx_dropped++; return; } @@ -1621,6 +1670,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); dev_kfree_skb_any(skb); + rx_ring->rx_errors++; return; } @@ -1629,6 +1679,14 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, */ if (skb->len > ndev->mtu + ETH_HLEN) { dev_kfree_skb_any(skb); + rx_ring->rx_dropped++; + return; + } + + /* loopback self test for ethtool */ + if (test_bit(QL_SELFTEST, &qdev->flags)) { + ql_check_lb_frame(qdev, skb); + dev_kfree_skb_any(skb); return; } @@ -1642,6 +1700,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + rx_ring->rx_multicast++; } if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); @@ -1673,8 +1732,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, } } - qdev->stats.rx_packets++; - qdev->stats.rx_bytes += skb->len; + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; skb_record_rx_queue(skb, rx_ring->cq_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) { if (qdev->vlgrp && @@ -1705,8 +1764,8 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; tx_ring_desc = &tx_ring->q[mac_rsp->tid]; ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); - qdev->stats.tx_bytes += (tx_ring_desc->skb)->len; - qdev->stats.tx_packets++; + tx_ring->tx_bytes += (tx_ring_desc->skb)->len; + tx_ring->tx_packets++; dev_kfree_skb(tx_ring_desc->skb); tx_ring_desc->skb = NULL; @@ -1929,7 +1988,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) return work_done; } -static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) +static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) { struct ql_adapter *qdev = netdev_priv(ndev); @@ -1945,7 +2004,7 @@ static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) } } -static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) +static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); u32 enable_bit = MAC_ADDR_E; @@ -1961,7 +2020,7 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } -static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) +static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); u32 enable_bit = 0; @@ -2046,12 +2105,12 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) */ var = ql_read32(qdev, ISR1); if (var & intr_context->irq_mask) { - QPRINTK(qdev, INTR, INFO, + QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); - napi_schedule(&rx_ring->napi); - work_done++; - } + napi_schedule(&rx_ring->napi); + work_done++; + } ql_enable_completion_interrupt(qdev, intr_context->intr); return work_done ? IRQ_HANDLED : IRQ_NONE; } @@ -2149,6 +2208,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) __func__, tx_ring_idx); netif_stop_subqueue(ndev, tx_ring->wq_id); atomic_inc(&tx_ring->queue_stopped); + tx_ring->tx_errors++; return NETDEV_TX_BUSY; } tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; @@ -2183,6 +2243,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) NETDEV_TX_OK) { QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n"); + tx_ring->tx_errors++; return NETDEV_TX_BUSY; } QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); @@ -2199,6 +2260,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } + static void ql_free_shadow_space(struct ql_adapter *qdev) { if (qdev->rx_ring_shadow_reg_area) { @@ -2285,8 +2347,8 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev, pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, &tx_ring->wq_base_dma); - if ((tx_ring->wq_base == NULL) - || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { + if ((tx_ring->wq_base == NULL) || + tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n"); return -ENOMEM; } @@ -2304,20 +2366,29 @@ err: static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { - int i; struct bq_desc *lbq_desc; - for (i = 0; i < rx_ring->lbq_len; i++) { - lbq_desc = &rx_ring->lbq[i]; - if (lbq_desc->p.lbq_page) { + uint32_t curr_idx, clean_idx; + + curr_idx = rx_ring->lbq_curr_idx; + clean_idx = rx_ring->lbq_clean_idx; + while (curr_idx != clean_idx) { + lbq_desc = &rx_ring->lbq[curr_idx]; + + if (lbq_desc->p.pg_chunk.last_flag) { pci_unmap_page(qdev->pdev, - pci_unmap_addr(lbq_desc, mapaddr), - pci_unmap_len(lbq_desc, maplen), + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); - - put_page(lbq_desc->p.lbq_page); - lbq_desc->p.lbq_page = NULL; + lbq_desc->p.pg_chunk.last_flag = 0; } + + put_page(lbq_desc->p.pg_chunk.page); + lbq_desc->p.pg_chunk.page = NULL; + + if (++curr_idx == rx_ring->lbq_len) + curr_idx = 0; + } } @@ -2615,6 +2686,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) /* Set up the shadow registers for this ring. */ rx_ring->prod_idx_sh_reg = shadow_reg; rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; + *rx_ring->prod_idx_sh_reg = 0; shadow_reg += sizeof(u64); shadow_reg_dma += sizeof(u64); rx_ring->lbq_base_indirect = shadow_reg; @@ -2692,7 +2764,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq_base_indirect_dma); cqicb->sbq_buf_size = - cpu_to_le16((u16)(rx_ring->sbq_buf_size/2)); + cpu_to_le16((u16)(rx_ring->sbq_buf_size)); bq_len = (rx_ring->sbq_len == 65536) ? 0 : (u16) rx_ring->sbq_len; cqicb->sbq_len = cpu_to_le16(bq_len); @@ -2798,7 +2870,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) int i, err; /* Get the MSIX vectors. */ - if (irq_type == MSIX_IRQ) { + if (qlge_irq_type == MSIX_IRQ) { /* Try to alloc space for the msix struct, * if it fails then go to MSI/legacy. */ @@ -2806,7 +2878,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) sizeof(struct msix_entry), GFP_KERNEL); if (!qdev->msi_x_entry) { - irq_type = MSI_IRQ; + qlge_irq_type = MSI_IRQ; goto msi; } @@ -2829,7 +2901,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) QPRINTK(qdev, IFUP, WARNING, "MSI-X Enable failed, trying MSI.\n"); qdev->intr_count = 1; - irq_type = MSI_IRQ; + qlge_irq_type = MSI_IRQ; } else if (err == 0) { set_bit(QL_MSIX_ENABLED, &qdev->flags); QPRINTK(qdev, IFUP, INFO, @@ -2840,7 +2912,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) } msi: qdev->intr_count = 1; - if (irq_type == MSI_IRQ) { + if (qlge_irq_type == MSI_IRQ) { if (!pci_enable_msi(qdev->pdev)) { set_bit(QL_MSI_ENABLED, &qdev->flags); QPRINTK(qdev, IFUP, INFO, @@ -2848,7 +2920,7 @@ msi: return; } } - irq_type = LEG_IRQ; + qlge_irq_type = LEG_IRQ; QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); } @@ -3268,7 +3340,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) ql_write32(qdev, FSC, mask | value); ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | - min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); + min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE)); /* Set RX packet routing to use port/pci function on which the * packet arrived on in addition to usual frame routing. @@ -3276,6 +3348,22 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) * the same MAC address. */ ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); + /* Reroute all packets to our Interface. + * They may have been routed to MPI firmware + * due to WOL. + */ + value = ql_read32(qdev, MGMT_RCV_CFG); + value &= ~MGMT_RCV_CFG_RM; + mask = 0xffff0000; + + /* Sticky reg needs clearing due to WOL. */ + ql_write32(qdev, MGMT_RCV_CFG, mask); + ql_write32(qdev, MGMT_RCV_CFG, mask | value); + + /* Default WOL is enable on Mezz cards */ + if (qdev->pdev->subsystem_device == 0x0068 || + qdev->pdev->subsystem_device == 0x0180) + qdev->wol = WAKE_MAGIC; /* Start up the rx queues. */ for (i = 0; i < qdev->rx_ring_count; i++) { @@ -3310,10 +3398,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) /* Initialize the port and set the max framesize. */ status = qdev->nic_ops->port_initialize(qdev); - if (status) { - QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); - return status; - } + if (status) + QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); /* Set up the MAC address and frame routing filter. */ status = ql_cam_route_initialize(qdev); @@ -3392,6 +3478,52 @@ static void ql_display_dev_info(struct net_device *ndev) QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); } +int ql_wol(struct ql_adapter *qdev) +{ + int status = 0; + u32 wol = MB_WOL_DISABLE; + + /* The CAM is still intact after a reset, but if we + * are doing WOL, then we may need to program the + * routing regs. We would also need to issue the mailbox + * commands to instruct the MPI what to do per the ethtool + * settings. + */ + + if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | + WAKE_MCAST | WAKE_BCAST)) { + QPRINTK(qdev, IFDOWN, ERR, + "Unsupported WOL paramter. qdev->wol = 0x%x.\n", + qdev->wol); + return -EINVAL; + } + + if (qdev->wol & WAKE_MAGIC) { + status = ql_mb_wol_set_magic(qdev, 1); + if (status) { + QPRINTK(qdev, IFDOWN, ERR, + "Failed to set magic packet on %s.\n", + qdev->ndev->name); + return status; + } else + QPRINTK(qdev, DRV, INFO, + "Enabled magic packet successfully on %s.\n", + qdev->ndev->name); + + wol |= MB_WOL_MAGIC_PKT; + } + + if (qdev->wol) { + wol |= MB_WOL_MODE_ON; + status = ql_mb_wol_mode(qdev, wol); + QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", + (status == 0) ? "Sucessfully set" : "Failed", wol, + qdev->ndev->name); + } + + return status; +} + static int ql_adapter_down(struct ql_adapter *qdev) { int i, status = 0; @@ -3497,6 +3629,10 @@ static int ql_configure_rings(struct ql_adapter *qdev) struct rx_ring *rx_ring; struct tx_ring *tx_ring; int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); + unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + + qdev->lbq_buf_order = get_order(lbq_buf_len); /* In a perfect world we have one RSS ring for each CPU * and each has it's own vector. To do that we ask for @@ -3544,11 +3680,14 @@ static int ql_configure_rings(struct ql_adapter *qdev) rx_ring->lbq_len = NUM_LARGE_BUFFERS; rx_ring->lbq_size = rx_ring->lbq_len * sizeof(__le64); - rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; + rx_ring->lbq_buf_size = (u16)lbq_buf_len; + QPRINTK(qdev, IFUP, DEBUG, + "lbq_buf_size %d, order = %d\n", + rx_ring->lbq_buf_size, qdev->lbq_buf_order); rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring->sbq_size = rx_ring->sbq_len * sizeof(__le64); - rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; + rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; rx_ring->type = RX_Q; } else { /* @@ -3575,6 +3714,10 @@ static int qlge_open(struct net_device *ndev) int err = 0; struct ql_adapter *qdev = netdev_priv(ndev); + err = ql_adapter_reset(qdev); + if (err) + return err; + err = ql_configure_rings(qdev); if (err) return err; @@ -3594,14 +3737,63 @@ error_up: return err; } +static int ql_change_rx_buffers(struct ql_adapter *qdev) +{ + struct rx_ring *rx_ring; + int i, status; + u32 lbq_buf_len; + + /* Wait for an oustanding reset to complete. */ + if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { + int i = 3; + while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { + QPRINTK(qdev, IFUP, ERR, + "Waiting for adapter UP...\n"); + ssleep(1); + } + + if (!i) { + QPRINTK(qdev, IFUP, ERR, + "Timed out waiting for adapter UP\n"); + return -ETIMEDOUT; + } + } + + status = ql_adapter_down(qdev); + if (status) + goto error; + + /* Get the new rx buffer size. */ + lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + qdev->lbq_buf_order = get_order(lbq_buf_len); + + for (i = 0; i < qdev->rss_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + /* Set the new size. */ + rx_ring->lbq_buf_size = lbq_buf_len; + } + + status = ql_adapter_up(qdev); + if (status) + goto error; + + return status; +error: + QPRINTK(qdev, IFUP, ALERT, + "Driver up/down cycle failed, closing device.\n"); + set_bit(QL_ADAPTER_UP, &qdev->flags); + dev_close(qdev->ndev); + return status; +} + static int qlge_change_mtu(struct net_device *ndev, int new_mtu) { struct ql_adapter *qdev = netdev_priv(ndev); + int status; if (ndev->mtu == 1500 && new_mtu == 9000) { QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); - queue_delayed_work(qdev->workqueue, - &qdev->mpi_port_cfg_work, 0); } else if (ndev->mtu == 9000 && new_mtu == 1500) { QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); } else if ((ndev->mtu == 1500 && new_mtu == 1500) || @@ -3609,15 +3801,60 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu) return 0; } else return -EINVAL; + + queue_delayed_work(qdev->workqueue, + &qdev->mpi_port_cfg_work, 3*HZ); + + if (!netif_running(qdev->ndev)) { + ndev->mtu = new_mtu; + return 0; + } + ndev->mtu = new_mtu; - return 0; + status = ql_change_rx_buffers(qdev); + if (status) { + QPRINTK(qdev, IFUP, ERR, + "Changing MTU failed.\n"); + } + + return status; } static struct net_device_stats *qlge_get_stats(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); - return &qdev->stats; + struct rx_ring *rx_ring = &qdev->rx_ring[0]; + struct tx_ring *tx_ring = &qdev->tx_ring[0]; + unsigned long pkts, mcast, dropped, errors, bytes; + int i; + + /* Get RX stats. */ + pkts = mcast = dropped = errors = bytes = 0; + for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { + pkts += rx_ring->rx_packets; + bytes += rx_ring->rx_bytes; + dropped += rx_ring->rx_dropped; + errors += rx_ring->rx_errors; + mcast += rx_ring->rx_multicast; + } + ndev->stats.rx_packets = pkts; + ndev->stats.rx_bytes = bytes; + ndev->stats.rx_dropped = dropped; + ndev->stats.rx_errors = errors; + ndev->stats.multicast = mcast; + + /* Get TX stats. */ + pkts = errors = bytes = 0; + for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { + pkts += tx_ring->tx_packets; + bytes += tx_ring->tx_bytes; + errors += tx_ring->tx_errors; + } + ndev->stats.tx_packets = pkts; + ndev->stats.tx_bytes = bytes; + ndev->stats.tx_errors = errors; + return &ndev->stats; } static void qlge_set_multicast_list(struct net_device *ndev) @@ -3714,9 +3951,6 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) struct sockaddr *addr = p; int status; - if (netif_running(ndev)) - return -EBUSY; - if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); @@ -3868,8 +4102,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, struct net_device *ndev, int cards_found) { struct ql_adapter *qdev = netdev_priv(ndev); - int pos, err = 0; - u16 val16; + int err = 0; memset((void *)qdev, 0, sizeof(*qdev)); err = pci_enable_device(pdev); @@ -3881,18 +4114,12 @@ static int __devinit ql_init_device(struct pci_dev *pdev, qdev->ndev = ndev; qdev->pdev = pdev; pci_set_drvdata(pdev, ndev); - pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (pos <= 0) { - dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " - "aborting.\n"); - return pos; - } else { - pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); - val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; - val16 |= (PCI_EXP_DEVCTL_CERE | - PCI_EXP_DEVCTL_NFERE | - PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); - pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); + + /* Set PCIe read request size */ + err = pcie_set_readrq(pdev, 4096); + if (err) { + dev_err(&pdev->dev, "Set readrq failed.\n"); + goto err_out; } err = pci_request_regions(pdev, DRV_NAME); @@ -3991,7 +4218,6 @@ err_out: return err; } - static const struct net_device_ops qlge_netdev_ops = { .ndo_open = qlge_open, .ndo_stop = qlge_close, @@ -4002,9 +4228,9 @@ static const struct net_device_ops qlge_netdev_ops = { .ndo_set_mac_address = qlge_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = qlge_tx_timeout, - .ndo_vlan_rx_register = ql_vlan_rx_register, - .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid, + .ndo_vlan_rx_register = qlge_vlan_rx_register, + .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, }; static int __devinit qlge_probe(struct pci_dev *pdev, @@ -4060,10 +4286,21 @@ static int __devinit qlge_probe(struct pci_dev *pdev, } ql_link_off(qdev); ql_display_dev_info(ndev); + atomic_set(&qdev->lb_count, 0); cards_found++; return 0; } +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) +{ + return qlge_send(skb, ndev); +} + +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) +{ + return ql_clean_inbound_rx_ring(rx_ring, budget); +} + static void __devexit qlge_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); @@ -4193,6 +4430,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) return err; } + ql_wol(qdev); err = pci_save_state(pdev); if (err) return err; diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index aec05f26610..e2b2286102d 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -1,25 +1,5 @@ #include "qlge.h" -static void ql_display_mb_sts(struct ql_adapter *qdev, - struct mbox_params *mbcp) -{ - int i; - static char *err_sts[] = { - "Command Complete", - "Command Not Supported", - "Host Interface Error", - "Checksum Error", - "Unused Completion Status", - "Test Failed", - "Command Parameter Error"}; - - QPRINTK(qdev, DRV, DEBUG, "%s.\n", - err_sts[mbcp->mbox_out[0] & 0x0000000f]); - for (i = 0; i < mbcp->out_count; i++) - QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n", - i, mbcp->mbox_out[i]); -} - int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status; @@ -317,6 +297,7 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) } else { QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n", mbcp->mbox_out[1]); + qdev->fw_rev_id = mbcp->mbox_out[1]; status = ql_cam_route_initialize(qdev); if (status) QPRINTK(qdev, IFUP, ERR, @@ -446,6 +427,9 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) ql_aen_lost(qdev, mbcp); break; + case AEN_DCBX_CHG: + /* Need to support AEN 8110 */ + break; default: QPRINTK(qdev, DRV, ERR, "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); @@ -537,7 +521,6 @@ done: MB_CMD_STS_GOOD) && ((mbcp->mbox_out[0] & 0x0000f000) != MB_CMD_STS_INTRMDT)) { - ql_display_mb_sts(qdev, mbcp); status = -EIO; } end: @@ -655,7 +638,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev) * for the current port. * Most likely will block. */ -static int ql_mb_set_port_cfg(struct ql_adapter *qdev) +int ql_mb_set_port_cfg(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; @@ -690,7 +673,7 @@ static int ql_mb_set_port_cfg(struct ql_adapter *qdev) * for the current port. * Most likely will block. */ -static int ql_mb_get_port_cfg(struct ql_adapter *qdev) +int ql_mb_get_port_cfg(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; @@ -720,6 +703,76 @@ static int ql_mb_get_port_cfg(struct ql_adapter *qdev) return status; } +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; + mbcp->mbox_in[1] = wol; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + QPRINTK(qdev, DRV, ERR, + "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + u8 *addr = qdev->ndev->dev_addr; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 8; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; + if (enable_wol) { + mbcp->mbox_in[1] = (u32)addr[0]; + mbcp->mbox_in[2] = (u32)addr[1]; + mbcp->mbox_in[3] = (u32)addr[2]; + mbcp->mbox_in[4] = (u32)addr[3]; + mbcp->mbox_in[5] = (u32)addr[4]; + mbcp->mbox_in[6] = (u32)addr[5]; + mbcp->mbox_in[7] = 0; + } else { + mbcp->mbox_in[1] = 0; + mbcp->mbox_in[2] = 1; + mbcp->mbox_in[3] = 1; + mbcp->mbox_in[4] = 1; + mbcp->mbox_in[5] = 1; + mbcp->mbox_in[6] = 1; + mbcp->mbox_in[7] = 0; + } + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + QPRINTK(qdev, DRV, ERR, + "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + /* IDC - Inter Device Communication... * Some firmware commands require consent of adjacent FCOE * function. This function waits for the OK, or a @@ -769,6 +822,61 @@ static int ql_idc_wait(struct ql_adapter *qdev) return status; } +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; + mbcp->mbox_in[1] = led_config; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + QPRINTK(qdev, DRV, ERR, + "Failed to set LED Configuration.\n"); + status = -EIO; + } + + return status; +} + +int ql_mb_get_led_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + QPRINTK(qdev, DRV, ERR, + "Failed to get LED Configuration.\n"); + status = -EIO; + } else + qdev->led_config = mbcp->mbox_out[1]; + + return status; +} + int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) { struct mbox_params mbc; @@ -930,8 +1038,11 @@ void ql_mpi_idc_work(struct work_struct *work) int status; struct mbox_params *mbcp = &qdev->idc_mbc; u32 aen; + int timeout; + rtnl_lock(); aen = mbcp->mbox_out[1] >> 16; + timeout = (mbcp->mbox_out[1] >> 8) & 0xf; switch (aen) { default: @@ -939,22 +1050,61 @@ void ql_mpi_idc_work(struct work_struct *work) "Bug: Unhandled IDC action.\n"); break; case MB_CMD_PORT_RESET: - case MB_CMD_SET_PORT_CFG: case MB_CMD_STOP_FW: ql_link_off(qdev); + case MB_CMD_SET_PORT_CFG: /* Signal the resulting link up AEN * that the frame routing and mac addr * needs to be set. * */ set_bit(QL_CAM_RT_SET, &qdev->flags); - rtnl_lock(); - status = ql_mb_idc_ack(qdev); - rtnl_unlock(); - if (status) { - QPRINTK(qdev, DRV, ERR, - "Bug: No pending IDC!\n"); + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + QPRINTK(qdev, DRV, ERR, + "Bug: No pending IDC!\n"); + } else { + QPRINTK(qdev, DRV, DEBUG, + "IDC ACK not required\n"); + status = 0; /* success */ } + break; + + /* These sub-commands issued by another (FCoE) + * function are requesting to do an operation + * on the shared resource (MPI environment). + * We currently don't issue these so we just + * ACK the request. + */ + case MB_CMD_IOP_RESTART_MPI: + case MB_CMD_IOP_PREP_LINK_DOWN: + /* Drop the link, reload the routing + * table when link comes up. + */ + ql_link_off(qdev); + set_bit(QL_CAM_RT_SET, &qdev->flags); + /* Fall through. */ + case MB_CMD_IOP_DVR_START: + case MB_CMD_IOP_FLASH_ACC: + case MB_CMD_IOP_CORE_DUMP_MPI: + case MB_CMD_IOP_PREP_UPDATE_MPI: + case MB_CMD_IOP_COMP_UPDATE_MPI: + case MB_CMD_IOP_NONE: /* an IDC without params */ + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + QPRINTK(qdev, DRV, ERR, + "Bug: No pending IDC!\n"); + } else { + QPRINTK(qdev, DRV, DEBUG, + "IDC ACK not required\n"); + status = 0; /* success */ + } + break; } + rtnl_unlock(); } void ql_mpi_work(struct work_struct *work) |