diff options
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r-- | drivers/net/ehea/ehea.h | 42 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_ethtool.c | 115 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 979 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.c | 6 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.h | 6 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 184 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.h | 16 |
7 files changed, 837 insertions, 511 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 42295d61ecd..602872dbe15 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -39,7 +39,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0046" +#define DRV_VERSION "EHEA_0058" #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) @@ -78,10 +78,7 @@ #define EHEA_RQ2_PKT_SIZE 1522 #define EHEA_L_PKT_SIZE 256 /* low latency */ -#define EHEA_POLL_MAX_RWQE 1000 - /* Send completion signaling */ -#define EHEA_SIG_IV_LONG 1 /* Protection Domain Identifier */ #define EHEA_PD_ID 0xaabcdeff @@ -108,11 +105,7 @@ #define EHEA_CACHE_LINE 128 /* Memory Regions */ -#define EHEA_MR_MAX_TX_PAGES 20 -#define EHEA_MR_TX_DATA_PN 3 #define EHEA_MR_ACC_CTRL 0x00800000 -#define EHEA_RWQES_PER_MR_RQ2 10 -#define EHEA_RWQES_PER_MR_RQ3 10 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ @@ -311,6 +304,7 @@ struct ehea_cq { * Memory Region */ struct ehea_mr { + struct ehea_adapter *adapter; u64 handle; u64 vaddr; u32 lkey; @@ -319,17 +313,12 @@ struct ehea_mr { /* * Port state information */ -struct port_state { - int poll_max_processed; +struct port_stats { int poll_receive_errors; - int ehea_poll; int queue_stopped; - int min_swqe_avail; - u64 sqc_stop_sum; - int pkt_send; - int pkt_xmit; - int send_tasklet; - int nwqe; + int err_tcp_cksum; + int err_ip_cksum; + int err_frame_crc; }; #define EHEA_IRQ_NAME_SIZE 20 @@ -348,6 +337,7 @@ struct ehea_q_skb_arr { * Port resources */ struct ehea_port_res { + struct port_stats p_stats; struct ehea_mr send_mr; /* send memory region */ struct ehea_mr recv_mr; /* receive memory region */ spinlock_t xmit_lock; @@ -357,9 +347,8 @@ struct ehea_port_res { struct ehea_qp *qp; struct ehea_cq *send_cq; struct ehea_cq *recv_cq; - struct ehea_eq *send_eq; - struct ehea_eq *recv_eq; - spinlock_t send_lock; + struct ehea_eq *eq; + struct net_device *d_netdev; struct ehea_q_skb_arr rq1_skba; struct ehea_q_skb_arr rq2_skba; struct ehea_q_skb_arr rq3_skba; @@ -369,21 +358,18 @@ struct ehea_port_res { int swqe_refill_th; atomic_t swqe_avail; int swqe_ll_count; - int swqe_count; u32 swqe_id_counter; u64 tx_packets; - struct tasklet_struct send_comp_task; - spinlock_t recv_lock; - struct port_state p_state; u64 rx_packets; u32 poll_counter; }; +#define EHEA_MAX_PORTS 16 struct ehea_adapter { u64 handle; - u8 num_ports; - struct ehea_port *port[16]; + struct ibmebus_dev *ebus_dev; + struct ehea_port *port[EHEA_MAX_PORTS]; struct ehea_eq *neq; /* notification event queue */ struct workqueue_struct *ehea_wq; struct tasklet_struct neq_tasklet; @@ -406,7 +392,7 @@ struct ehea_port { struct net_device *netdev; struct net_device_stats stats; struct ehea_port_res port_res[EHEA_MAX_PORT_RES]; - struct device_node *of_dev_node; /* Open Firmware Device Node */ + struct of_device ofdev; /* Open Firmware Device */ struct ehea_mc_list *mc_list; /* Multicast MAC addresses */ struct vlan_group *vgrp; struct ehea_eq *qp_eq; @@ -415,7 +401,9 @@ struct ehea_port { char int_aff_name[EHEA_IRQ_NAME_SIZE]; int allmulti; /* Indicates IFF_ALLMULTI state */ int promisc; /* Indicates IFF_PROMISC state */ + int num_tx_qps; int num_add_tx_qps; + int num_mcs; int resets; u64 mac_addr; u32 logical_port_id; diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 9f57c2e78ce..decec8cfe96 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -144,8 +144,8 @@ static int ehea_nway_reset(struct net_device *dev) static void ehea_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); - strlcpy(info->version, DRV_VERSION, sizeof(info->version) - 1); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } static u32 ehea_get_msglevel(struct net_device *dev) @@ -166,33 +166,23 @@ static u32 ehea_get_rx_csum(struct net_device *dev) } static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { - {"poll_max_processed"}, - {"queue_stopped"}, - {"min_swqe_avail"}, - {"poll_receive_err"}, - {"pkt_send"}, - {"pkt_xmit"}, - {"send_tasklet"}, - {"ehea_poll"}, - {"nwqe"}, - {"swqe_available_0"}, {"sig_comp_iv"}, {"swqe_refill_th"}, {"port resets"}, - {"rxo"}, - {"rx64"}, - {"rx65"}, - {"rx128"}, - {"rx256"}, - {"rx512"}, - {"rx1024"}, - {"txo"}, - {"tx64"}, - {"tx65"}, - {"tx128"}, - {"tx256"}, - {"tx512"}, - {"tx1024"}, + {"Receive errors"}, + {"TCP cksum errors"}, + {"IP cksum errors"}, + {"Frame cksum errors"}, + {"num SQ stopped"}, + {"SQ stopped"}, + {"PR0 free_swqes"}, + {"PR1 free_swqes"}, + {"PR2 free_swqes"}, + {"PR3 free_swqes"}, + {"PR4 free_swqes"}, + {"PR5 free_swqes"}, + {"PR6 free_swqes"}, + {"PR7 free_swqes"}, }; static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -211,63 +201,44 @@ static int ehea_get_stats_count(struct net_device *dev) static void ehea_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { - u64 hret; - int i; + int i, k, tmp; struct ehea_port *port = netdev_priv(dev); - struct ehea_adapter *adapter = port->adapter; - struct ehea_port_res *pr = &port->port_res[0]; - struct port_state *p_state = &pr->p_state; - struct hcp_ehea_port_cb6 *cb6; for (i = 0; i < ehea_get_stats_count(dev); i++) data[i] = 0; - i = 0; - data[i++] = p_state->poll_max_processed; - data[i++] = p_state->queue_stopped; - data[i++] = p_state->min_swqe_avail; - data[i++] = p_state->poll_receive_errors; - data[i++] = p_state->pkt_send; - data[i++] = p_state->pkt_xmit; - data[i++] = p_state->send_tasklet; - data[i++] = p_state->ehea_poll; - data[i++] = p_state->nwqe; - data[i++] = atomic_read(&port->port_res[0].swqe_avail); data[i++] = port->sig_comp_iv; data[i++] = port->port_res[0].swqe_refill_th; data[i++] = port->resets; - cb6 = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!cb6) { - ehea_error("no mem for cb6"); - return; - } + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.poll_receive_errors; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.err_tcp_cksum; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.err_ip_cksum; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.err_frame_crc; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.queue_stopped; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp |= port->port_res[k].queue_stopped; + data[i++] = tmp; + + for (k = 0; k < 8; k++) + data[i++] = atomic_read(&port->port_res[k].swqe_avail); - hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, - H_PORT_CB6, H_PORT_CB6_ALL, cb6); - if (netif_msg_hw(port)) - ehea_dump(cb6, sizeof(*cb6), "ehea_get_ethtool_stats"); - - if (hret == H_SUCCESS) { - data[i++] = cb6->rxo; - data[i++] = cb6->rx64; - data[i++] = cb6->rx65; - data[i++] = cb6->rx128; - data[i++] = cb6->rx256; - data[i++] = cb6->rx512; - data[i++] = cb6->rx1024; - data[i++] = cb6->txo; - data[i++] = cb6->tx64; - data[i++] = cb6->tx65; - data[i++] = cb6->tx128; - data[i++] = cb6->tx256; - data[i++] = cb6->tx512; - data[i++] = cb6->tx1024; - } else - ehea_error("query_ehea_port failed"); - - kfree(cb6); } const struct ethtool_ops ehea_ethtool_ops = { diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 0e4042bc0a4..c7a5614e66c 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -51,13 +51,18 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; static int sq_entries = EHEA_DEF_ENTRIES_SQ; +static int use_mcs = 0; +static int num_tx_qps = EHEA_NUM_TX_QP; module_param(msg_level, int, 0); module_param(rq1_entries, int, 0); module_param(rq2_entries, int, 0); module_param(rq3_entries, int, 0); module_param(sq_entries, int, 0); +module_param(use_mcs, int, 0); +module_param(num_tx_qps, int, 0); +MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS"); MODULE_PARM_DESC(msg_level, "msg_level"); MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " "[2^x - 1], x = [6..14]. Default = " @@ -71,6 +76,29 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " "[2^x - 1], x = [6..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); +MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); + +static int port_name_cnt = 0; + +static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, + const struct of_device_id *id); + +static int __devexit ehea_remove(struct ibmebus_dev *dev); + +static struct of_device_id ehea_device_table[] = { + { + .name = "lhea", + .compatible = "IBM,lhea", + }, + {}, +}; + +static struct ibmebus_driver ehea_driver = { + .name = "ehea", + .id_table = ehea_device_table, + .probe = ehea_probe_adapter, + .remove = ehea_remove, +}; void ehea_dump(void *adr, int len, char *msg) { int x; @@ -197,7 +225,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); if (!skb) { ehea_error("%s: no mem for skb/%d wqes filled", - dev->name, i); + pr->port->netdev->name, i); q_skba->os_skbs = fill_wqes - i; ret = -ENOMEM; break; @@ -321,6 +349,13 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, { struct sk_buff *skb; + if (cqe->status & EHEA_CQE_STAT_ERR_TCP) + pr->p_stats.err_tcp_cksum++; + if (cqe->status & EHEA_CQE_STAT_ERR_IP) + pr->p_stats.err_ip_cksum++; + if (cqe->status & EHEA_CQE_STAT_ERR_CRC) + pr->p_stats.err_frame_crc++; + if (netif_msg_rx_err(pr->port)) { ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); ehea_dump(cqe, sizeof(*cqe), "CQE"); @@ -345,10 +380,11 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, return 0; } -static int ehea_poll(struct net_device *dev, int *budget) +static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, + struct ehea_port_res *pr, + int *budget) { - struct ehea_port *port = netdev_priv(dev); - struct ehea_port_res *pr = &port->port_res[0]; + struct ehea_port *port = pr->port; struct ehea_qp *qp = pr->qp; struct ehea_cqe *cqe; struct sk_buff *skb; @@ -359,14 +395,12 @@ static int ehea_poll(struct net_device *dev, int *budget) int skb_arr_rq2_len = pr->rq2_skba.len; int skb_arr_rq3_len = pr->rq3_skba.len; int processed, processed_rq1, processed_rq2, processed_rq3; - int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset; + int wqe_index, last_wqe_index, rq, my_quota, port_reset; processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; last_wqe_index = 0; my_quota = min(*budget, dev->quota); - my_quota = min(my_quota, EHEA_POLL_MAX_RWQE); - /* rq0 is low latency RQ */ cqe = ehea_poll_rq1(qp, &wqe_index); while ((my_quota > 0) && cqe) { ehea_inc_rq1(qp); @@ -386,13 +420,14 @@ static int ehea_poll(struct net_device *dev, int *budget) if (unlikely(!skb)) { if (netif_msg_rx_err(port)) ehea_error("LL rq1: skb=NULL"); - skb = netdev_alloc_skb(dev, + + skb = netdev_alloc_skb(port->netdev, EHEA_L_PKT_SIZE); if (!skb) break; } - memcpy(skb->data, ((char*)cqe) + 64, - cqe->num_bytes_transfered - 4); + skb_copy_to_linear_data(skb, ((char*)cqe) + 64, + cqe->num_bytes_transfered - 4); ehea_fill_skb(dev, skb, cqe); } else if (rq == 2) { /* RQ2 */ skb = get_skb_by_index(skb_arr_rq2, @@ -402,7 +437,7 @@ static int ehea_poll(struct net_device *dev, int *budget) ehea_error("rq2: skb=NULL"); break; } - ehea_fill_skb(dev, skb, cqe); + ehea_fill_skb(port->netdev, skb, cqe); processed_rq2++; } else { /* RQ3 */ skb = get_skb_by_index(skb_arr_rq3, @@ -412,7 +447,7 @@ static int ehea_poll(struct net_device *dev, int *budget) ehea_error("rq3: skb=NULL"); break; } - ehea_fill_skb(dev, skb, cqe); + ehea_fill_skb(port->netdev, skb, cqe); processed_rq3++; } @@ -421,9 +456,8 @@ static int ehea_poll(struct net_device *dev, int *budget) cqe->vlan_tag); else netif_receive_skb(skb); - - } else { /* Error occured */ - pr->p_state.poll_receive_errors++; + } else { + pr->p_stats.poll_receive_errors++; port_reset = ehea_treat_poll_error(pr, rq, cqe, &processed_rq2, &processed_rq3); @@ -433,72 +467,32 @@ static int ehea_poll(struct net_device *dev, int *budget) cqe = ehea_poll_rq1(qp, &wqe_index); } - dev->quota -= processed; - *budget -= processed; - - pr->p_state.ehea_poll += 1; pr->rx_packets += processed; + *budget -= processed; ehea_refill_rq1(pr, last_wqe_index, processed_rq1); ehea_refill_rq2(pr, processed_rq2); ehea_refill_rq3(pr, processed_rq3); - intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF); - - if (!cqe || intreq) { - netif_rx_complete(dev); - ehea_reset_cq_ep(pr->recv_cq); - ehea_reset_cq_n1(pr->recv_cq); - cqe = hw_qeit_get_valid(&qp->hw_rqueue1); - if (!cqe || intreq) - return 0; - if (!netif_rx_reschedule(dev, my_quota)) - return 0; - } - return 1; + cqe = ehea_poll_rq1(qp, &wqe_index); + return cqe; } -void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr) +static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) { struct sk_buff *skb; - int index, max_index_mask, i; - - index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); - max_index_mask = pr->sq_skba.len - 1; - for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) { - skb = pr->sq_skba.arr[index]; - if (likely(skb)) { - dev_kfree_skb(skb); - pr->sq_skba.arr[index] = NULL; - } else { - ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d", - cqe->wr_id, i, index); - } - index--; - index &= max_index_mask; - } -} - -#define MAX_SENDCOMP_QUOTA 400 -void ehea_send_irq_tasklet(unsigned long data) -{ - struct ehea_port_res *pr = (struct ehea_port_res*)data; struct ehea_cq *send_cq = pr->send_cq; struct ehea_cqe *cqe; - int quota = MAX_SENDCOMP_QUOTA; + int quota = my_quota; int cqe_counter = 0; int swqe_av = 0; + int index; unsigned long flags; - do { - cqe = ehea_poll_cq(send_cq); - if (!cqe) { - ehea_reset_cq_ep(send_cq); - ehea_reset_cq_n1(send_cq); - cqe = ehea_poll_cq(send_cq); - if (!cqe) - break; - } + cqe = ehea_poll_cq(send_cq); + while(cqe && (quota > 0)) { + ehea_inc_cq(send_cq); + cqe_counter++; rmb(); if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { @@ -514,17 +508,25 @@ void ehea_send_irq_tasklet(unsigned long data) ehea_dump(cqe, sizeof(*cqe), "CQE"); if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) - == EHEA_SWQE2_TYPE)) - free_sent_skbs(cqe, pr); + == EHEA_SWQE2_TYPE)) { + + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); + skb = pr->sq_skba.arr[index]; + dev_kfree_skb(skb); + pr->sq_skba.arr[index] = NULL; + } swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); quota--; - } while (quota > 0); + + cqe = ehea_poll_cq(send_cq); + }; ehea_update_feca(send_cq, cqe_counter); atomic_add(swqe_av, &pr->swqe_avail); spin_lock_irqsave(&pr->netif_queue, flags); + if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) { netif_wake_queue(pr->port->netdev); @@ -532,22 +534,55 @@ void ehea_send_irq_tasklet(unsigned long data) } spin_unlock_irqrestore(&pr->netif_queue, flags); - if (unlikely(cqe)) - tasklet_hi_schedule(&pr->send_comp_task); + return cqe; } -static irqreturn_t ehea_send_irq_handler(int irq, void *param) +#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 + +static int ehea_poll(struct net_device *dev, int *budget) { - struct ehea_port_res *pr = param; - tasklet_hi_schedule(&pr->send_comp_task); - return IRQ_HANDLED; + struct ehea_port_res *pr = dev->priv; + struct ehea_cqe *cqe; + struct ehea_cqe *cqe_skb = NULL; + int force_irq, wqe_index; + + cqe = ehea_poll_rq1(pr->qp, &wqe_index); + cqe_skb = ehea_poll_cq(pr->send_cq); + + force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); + + if ((!cqe && !cqe_skb) || force_irq) { + pr->poll_counter = 0; + netif_rx_complete(dev); + ehea_reset_cq_ep(pr->recv_cq); + ehea_reset_cq_ep(pr->send_cq); + ehea_reset_cq_n1(pr->recv_cq); + ehea_reset_cq_n1(pr->send_cq); + cqe = ehea_poll_rq1(pr->qp, &wqe_index); + cqe_skb = ehea_poll_cq(pr->send_cq); + + if (!cqe && !cqe_skb) + return 0; + + if (!netif_rx_reschedule(dev, dev->quota)) + return 0; + } + + cqe = ehea_proc_rwqes(dev, pr, budget); + cqe_skb = ehea_proc_cqes(pr, 300); + + if (cqe || cqe_skb) + pr->poll_counter++; + + return 1; } static irqreturn_t ehea_recv_irq_handler(int irq, void *param) { struct ehea_port_res *pr = param; - struct ehea_port *port = pr->port; - netif_rx_schedule(port->netdev); + + netif_rx_schedule(pr->d_netdev); + return IRQ_HANDLED; } @@ -580,7 +615,7 @@ static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, { int i; - for (i = 0; i < adapter->num_ports; i++) + for (i = 0; i < EHEA_MAX_PORTS; i++) if (adapter->port[i]) if (adapter->port[i]->logical_port_id == logical_port) return adapter->port[i]; @@ -650,19 +685,25 @@ int ehea_sense_port_attr(struct ehea_port *port) } port->autoneg = 1; + port->num_mcs = cb0->num_default_qps; /* Number of default QPs */ - port->num_def_qps = cb0->num_default_qps; + if (use_mcs) + port->num_def_qps = cb0->num_default_qps; + else + port->num_def_qps = 1; if (!port->num_def_qps) { ret = -EINVAL; goto out_free; } - if (port->num_def_qps >= EHEA_NUM_TX_QP) + port->num_tx_qps = num_tx_qps; + + if (port->num_def_qps >= port->num_tx_qps) port->num_add_tx_qps = 0; else - port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps; + port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps; ret = 0; out_free: @@ -882,23 +923,6 @@ static int ehea_reg_interrupts(struct net_device *dev) struct ehea_port_res *pr; int i, ret; - for (i = 0; i < port->num_def_qps; i++) { - pr = &port->port_res[i]; - snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1 - , "%s-recv%d", dev->name, i); - ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1, - ehea_recv_irq_handler, - IRQF_DISABLED, pr->int_recv_name, pr); - if (ret) { - ehea_error("failed registering irq for ehea_recv_int:" - "port_res_nr:%d, ist=%X", i, - pr->recv_eq->attr.ist1); - goto out_free_seq; - } - if (netif_msg_ifup(port)) - ehea_info("irq_handle 0x%X for funct ehea_recv_int %d " - "registered", pr->recv_eq->attr.ist1, i); - } snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", dev->name); @@ -916,41 +940,41 @@ static int ehea_reg_interrupts(struct net_device *dev) ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " "registered", port->qp_eq->attr.ist1); + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { pr = &port->port_res[i]; snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, - "%s-send%d", dev->name, i); - ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1, - ehea_send_irq_handler, + "%s-queue%d", dev->name, i); + ret = ibmebus_request_irq(NULL, pr->eq->attr.ist1, + ehea_recv_irq_handler, IRQF_DISABLED, pr->int_send_name, pr); if (ret) { - ehea_error("failed registering irq for ehea_send " + ehea_error("failed registering irq for ehea_queue " "port_res_nr:%d, ist=%X", i, - pr->send_eq->attr.ist1); + pr->eq->attr.ist1); goto out_free_req; } if (netif_msg_ifup(port)) - ehea_info("irq_handle 0x%X for function ehea_send_int " - "%d registered", pr->send_eq->attr.ist1, i); + ehea_info("irq_handle 0x%X for function ehea_queue_int " + "%d registered", pr->eq->attr.ist1, i); } out: return ret; + out_free_req: while (--i >= 0) { - u32 ist = port->port_res[i].send_eq->attr.ist1; + u32 ist = port->port_res[i].eq->attr.ist1; ibmebus_free_irq(NULL, ist, &port->port_res[i]); } + out_free_qpeq: ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); i = port->num_def_qps; -out_free_seq: - while (--i >= 0) { - u32 ist = port->port_res[i].recv_eq->attr.ist1; - ibmebus_free_irq(NULL, ist, &port->port_res[i]); - } + goto out; + } static void ehea_free_interrupts(struct net_device *dev) @@ -960,21 +984,13 @@ static void ehea_free_interrupts(struct net_device *dev) int i; /* send */ + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { pr = &port->port_res[i]; - ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr); + ibmebus_free_irq(NULL, pr->eq->attr.ist1, pr); if (netif_msg_intr(port)) ehea_info("free send irq for res %d with handle 0x%X", - i, pr->send_eq->attr.ist1); - } - - /* receive */ - for (i = 0; i < port->num_def_qps; i++) { - pr = &port->port_res[i]; - ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr); - if (netif_msg_intr(port)) - ehea_info("free recv irq for res %d with handle 0x%X", - i, pr->recv_eq->attr.ist1); + i, pr->eq->attr.ist1); } /* associated events */ @@ -1003,8 +1019,13 @@ static int ehea_configure_port(struct ehea_port *port) PXLY_RC_VLAN_FILTER) | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); - for (i = 0; i < port->num_def_qps; i++) - cb0->default_qpn_arr[i] = port->port_res[0].qp->init_attr.qp_nr; + for (i = 0; i < port->num_mcs; i++) + if (use_mcs) + cb0->default_qpn_arr[i] = + port->port_res[i].qp->init_attr.qp_nr; + else + cb0->default_qpn_arr[i] = + port->port_res[0].qp->init_attr.qp_nr; if (netif_msg_ifup(port)) ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); @@ -1027,52 +1048,35 @@ out: return ret; } -static int ehea_gen_smrs(struct ehea_port_res *pr) +int ehea_gen_smrs(struct ehea_port_res *pr) { - u64 hret; + int ret; struct ehea_adapter *adapter = pr->port->adapter; - hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, - adapter->mr.vaddr, EHEA_MR_ACC_CTRL, - adapter->pd, &pr->send_mr); - if (hret != H_SUCCESS) + ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); + if (ret) goto out; - hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, - adapter->mr.vaddr, EHEA_MR_ACC_CTRL, - adapter->pd, &pr->recv_mr); - if (hret != H_SUCCESS) - goto out_freeres; + ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); + if (ret) + goto out_free; return 0; -out_freeres: - hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); - if (hret != H_SUCCESS) - ehea_error("failed freeing SMR"); +out_free: + ehea_rem_mr(&pr->send_mr); out: + ehea_error("Generating SMRS failed\n"); return -EIO; } -static int ehea_rem_smrs(struct ehea_port_res *pr) +int ehea_rem_smrs(struct ehea_port_res *pr) { - struct ehea_adapter *adapter = pr->port->adapter; - int ret = 0; - u64 hret; - - hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); - if (hret != H_SUCCESS) { - ret = -EIO; - ehea_error("failed freeing send SMR for pr=%p", pr); - } - - hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle); - if (hret != H_SUCCESS) { - ret = -EIO; - ehea_error("failed freeing recv SMR for pr=%p", pr); - } - - return ret; + if ((ehea_rem_mr(&pr->send_mr)) + || (ehea_rem_mr(&pr->recv_mr))) + return -EIO; + else + return 0; } static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) @@ -1103,25 +1107,17 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, memset(pr, 0, sizeof(struct ehea_port_res)); pr->port = port; - spin_lock_init(&pr->send_lock); - spin_lock_init(&pr->recv_lock); spin_lock_init(&pr->xmit_lock); spin_lock_init(&pr->netif_queue); - pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); - if (!pr->recv_eq) { - ehea_error("create_eq failed (recv_eq)"); - goto out_free; - } - - pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); - if (!pr->send_eq) { - ehea_error("create_eq failed (send_eq)"); + pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); + if (!pr->eq) { + ehea_error("create_eq failed (eq)"); goto out_free; } pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, - pr->recv_eq->fw_handle, + pr->eq->fw_handle, port->logical_port_id); if (!pr->recv_cq) { ehea_error("create_cq failed (cq_recv)"); @@ -1129,7 +1125,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, } pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, - pr->send_eq->fw_handle, + pr->eq->fw_handle, port->logical_port_id); if (!pr->send_cq) { ehea_error("create_cq failed (cq_send)"); @@ -1194,11 +1190,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, ret = -EIO; goto out_free; } - tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet, - (unsigned long)pr); + atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); kfree(init_attr); + + pr->d_netdev = alloc_netdev(0, "", ether_setup); + if (!pr->d_netdev) + goto out_free; + pr->d_netdev->priv = pr; + pr->d_netdev->weight = 64; + pr->d_netdev->poll = ehea_poll; + set_bit(__LINK_STATE_START, &pr->d_netdev->state); + strcpy(pr->d_netdev->name, port->netdev->name); + ret = 0; goto out; @@ -1211,8 +1216,7 @@ out_free: ehea_destroy_qp(pr->qp); ehea_destroy_cq(pr->send_cq); ehea_destroy_cq(pr->recv_cq); - ehea_destroy_eq(pr->send_eq); - ehea_destroy_eq(pr->recv_eq); + ehea_destroy_eq(pr->eq); out: return ret; } @@ -1221,13 +1225,14 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) { int ret, i; + free_netdev(pr->d_netdev); + ret = ehea_destroy_qp(pr->qp); if (!ret) { ehea_destroy_cq(pr->send_cq); ehea_destroy_cq(pr->recv_cq); - ehea_destroy_eq(pr->send_eq); - ehea_destroy_eq(pr->recv_eq); + ehea_destroy_eq(pr->eq); for (i = 0; i < pr->rq1_skba.len; i++) if (pr->rq1_skba.arr[i]) @@ -1262,8 +1267,8 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) static inline void write_ip_start_end(struct ehea_swqe *swqe, const struct sk_buff *skb) { - swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); - swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1); + swqe->ip_start = skb_network_offset(skb); + swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); } static inline void write_tcp_offset_end(struct ehea_swqe *swqe, @@ -1300,13 +1305,13 @@ static void write_swqe2_TSO(struct sk_buff *skb, /* copy only eth/ip/tcp headers to immediate data and * the rest of skb->data to sg1entry */ - headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4); + headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); skb_data_size = skb->len - skb->data_len; if (skb_data_size >= headersize) { /* copy immediate data */ - memcpy(imm_data, skb->data, headersize); + skb_copy_from_linear_data(skb, imm_data, headersize); swqe->immediate_data_length = headersize; if (skb_data_size > headersize) { @@ -1337,7 +1342,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, */ if (skb_data_size >= SWQE2_MAX_IMM) { /* copy immediate data */ - memcpy(imm_data, skb->data, SWQE2_MAX_IMM); + skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); swqe->immediate_data_length = SWQE2_MAX_IMM; @@ -1350,7 +1355,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, swqe->descriptors++; } } else { - memcpy(imm_data, skb->data, skb_data_size); + skb_copy_from_linear_data(skb, imm_data, skb_data_size); swqe->immediate_data_length = skb_data_size; } } @@ -1688,6 +1693,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, struct ehea_swqe *swqe, u32 lkey) { if (skb->protocol == htons(ETH_P_IP)) { + const struct iphdr *iph = ip_hdr(skb); /* IPv4 */ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IP_CHECKSUM @@ -1697,15 +1703,15 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, write_ip_start_end(swqe, skb); - if (skb->nh.iph->protocol == IPPROTO_UDP) { - if ((skb->nh.iph->frag_off & IP_MF) || - (skb->nh.iph->frag_off & IP_OFFSET)) + if (iph->protocol == IPPROTO_UDP) { + if ((iph->frag_off & IP_MF) || + (iph->frag_off & IP_OFFSET)) /* IP fragment, so don't change cs */ swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; else write_udp_offset_end(swqe, skb); - } else if (skb->nh.iph->protocol == IPPROTO_TCP) { + } else if (iph->protocol == IPPROTO_TCP) { write_tcp_offset_end(swqe, skb); } @@ -1731,10 +1737,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, int i; if (skb->protocol == htons(ETH_P_IP)) { + const struct iphdr *iph = ip_hdr(skb); /* IPv4 */ write_ip_start_end(swqe, skb); - if (skb->nh.iph->protocol == IPPROTO_TCP) { + if (iph->protocol == IPPROTO_TCP) { swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IP_CHECKSUM | EHEA_SWQE_TCP_CHECKSUM @@ -1742,9 +1749,9 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, write_tcp_offset_end(swqe, skb); - } else if (skb->nh.iph->protocol == IPPROTO_UDP) { - if ((skb->nh.iph->frag_off & IP_MF) || - (skb->nh.iph->frag_off & IP_OFFSET)) + } else if (iph->protocol == IPPROTO_UDP) { + if ((iph->frag_off & IP_MF) || + (iph->frag_off & IP_OFFSET)) /* IP fragment, so don't change cs */ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; @@ -1770,10 +1777,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, /* copy (immediate) data */ if (nfrags == 0) { /* data is in a single piece */ - memcpy(imm_data, skb->data, skb->len); + skb_copy_from_linear_data(skb, imm_data, skb->len); } else { /* first copy data from the skb->data buffer ... */ - memcpy(imm_data, skb->data, skb->len - skb->data_len); + skb_copy_from_linear_data(skb, imm_data, + skb->len - skb->data_len); imm_data += skb->len - skb->data_len; /* ... then copy data from the fragments */ @@ -1789,6 +1797,22 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, dev_kfree_skb(skb); } +static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps) +{ + struct tcphdr *tcp; + u32 tmp; + + if ((skb->protocol == htons(ETH_P_IP)) && + (skb->nh.iph->protocol == IPPROTO_TCP)) { + tcp = (struct tcphdr*)(skb->nh.raw + (skb->nh.iph->ihl * 4)); + tmp = (tcp->source + (tcp->dest << 16)) % 31; + tmp += skb->nh.iph->daddr % 31; + return tmp % num_qps; + } + else + return 0; +} + static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); @@ -1796,9 +1820,17 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; u32 lkey; int swqe_index; - struct ehea_port_res *pr = &port->port_res[0]; + struct ehea_port_res *pr; + + pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; - spin_lock(&pr->xmit_lock); + if (!spin_trylock(&pr->xmit_lock)) + return NETDEV_TX_BUSY; + + if (pr->queue_stopped) { + spin_unlock(&pr->xmit_lock); + return NETDEV_TX_BUSY; + } swqe = ehea_get_swqe(pr->qp, &swqe_index); memset(swqe, 0, SWQE_HEADER_SIZE); @@ -1821,6 +1853,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) + | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1) | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); pr->sq_skba.arr[pr->sq_skba.index] = skb; @@ -1829,14 +1862,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) lkey = pr->send_mr.lkey; ehea_xmit2(skb, dev, swqe, lkey); - - if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) { - swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, - EHEA_SIG_IV_LONG); - swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; - pr->swqe_count = 0; - } else - pr->swqe_count += 1; + swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; } pr->swqe_id_counter += 1; @@ -1856,6 +1882,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { spin_lock_irqsave(&pr->netif_queue, flags); if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { + pr->p_stats.queue_stopped++; netif_stop_queue(dev); pr->queue_stopped = 1; } @@ -2057,7 +2084,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps, } pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; - pr_cfg.max_entries_scq = sq_entries; + pr_cfg.max_entries_scq = sq_entries * 2; pr_cfg.max_entries_sq = sq_entries; pr_cfg.max_entries_rq1 = rq1_entries; pr_cfg.max_entries_rq2 = rq2_entries; @@ -2106,6 +2133,28 @@ static int ehea_clean_all_portres(struct ehea_port *port) return ret; } +static void ehea_remove_adapter_mr (struct ehea_adapter *adapter) +{ + int i; + + for (i=0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i]) + return; + + ehea_rem_mr(&adapter->mr); +} + +static int ehea_add_adapter_mr (struct ehea_adapter *adapter) +{ + int i; + + for (i=0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i]) + return 0; + + return ehea_reg_kernel_mr(adapter, &adapter->mr); +} + static int ehea_up(struct net_device *dev) { int ret, i; @@ -2205,8 +2254,10 @@ static int ehea_down(struct net_device *dev) ehea_drop_multicast_list(dev); ehea_free_interrupts(dev); - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) - tasklet_kill(&port->port_res[i].send_comp_task); + for (i = 0; i < port->num_def_qps; i++) + while (test_bit(__LINK_STATE_RX_SCHED, + &port->port_res[i].d_netdev->state)) + msleep(1); ehea_broadcast_reg_helper(port, H_DEREG_BCMC); ret = ehea_clean_all_portres(port); @@ -2273,8 +2324,6 @@ static void ehea_tx_watchdog(struct net_device *dev) int ehea_sense_adapter_attr(struct ehea_adapter *adapter) { struct hcp_query_ehea *cb; - struct device_node *lhea_dn = NULL; - struct device_node *eth_dn = NULL; u64 hret; int ret; @@ -2291,18 +2340,6 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter) goto out_herr; } - /* Determine the number of available logical ports - * by counting the child nodes of the lhea OFDT entry - */ - adapter->num_ports = 0; - lhea_dn = of_find_node_by_name(lhea_dn, "lhea"); - do { - eth_dn = of_get_next_child(lhea_dn, eth_dn); - if (eth_dn) - adapter->num_ports++; - } while ( eth_dn ); - of_node_put(lhea_dn); - adapter->max_mc_mac = cb->max_mc_mac - 1; ret = 0; @@ -2312,79 +2349,188 @@ out: return ret; } -static int ehea_setup_single_port(struct ehea_port *port, - struct device_node *dn) +int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) { - int ret; - u64 hret; - struct net_device *dev = port->netdev; - struct ehea_adapter *adapter = port->adapter; struct hcp_ehea_port_cb4 *cb4; - u32 *dn_log_port_id; - int jumbo = 0; - - sema_init(&port->port_lock, 1); - port->state = EHEA_PORT_DOWN; - port->sig_comp_iv = sq_entries / 10; - - if (!dn) { - ehea_error("bad device node: dn=%p", dn); - ret = -EINVAL; - goto out; - } - - port->of_dev_node = dn; - - /* Determine logical port id */ - dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL); - - if (!dn_log_port_id) { - ehea_error("bad device node: dn_log_port_id=%p", - dn_log_port_id); - ret = -EINVAL; - goto out; - } - port->logical_port_id = *dn_log_port_id; - - port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); - if (!port->mc_list) { - ret = -ENOMEM; - goto out; - } - - INIT_LIST_HEAD(&port->mc_list->list); + u64 hret; + int ret = 0; - ret = ehea_sense_port_attr(port); - if (ret) - goto out; + *jumbo = 0; - /* Enable Jumbo frames */ + /* (Try to) enable *jumbo frames */ cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!cb4) { ehea_error("no mem for cb4"); + ret = -ENOMEM; + goto out; } else { - hret = ehea_h_query_ehea_port(adapter->handle, + hret = ehea_h_query_ehea_port(port->adapter->handle, port->logical_port_id, H_PORT_CB4, H_PORT_CB4_JUMBO, cb4); - if (hret == H_SUCCESS) { if (cb4->jumbo_frame) - jumbo = 1; + *jumbo = 1; else { cb4->jumbo_frame = 1; - hret = ehea_h_modify_ehea_port(adapter->handle, + hret = ehea_h_modify_ehea_port(port->adapter-> + handle, port-> - logical_port_id, + logical_port_id, H_PORT_CB4, H_PORT_CB4_JUMBO, cb4); if (hret == H_SUCCESS) - jumbo = 1; + *jumbo = 1; } - } + } else + ret = -EINVAL; + kfree(cb4); } +out: + return ret; +} + +static ssize_t ehea_show_port_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); + return sprintf(buf, "0x%X", port->logical_port_id); +} + +static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, + NULL); + +static void __devinit logical_port_release(struct device *dev) +{ + struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); + of_node_put(port->ofdev.node); +} + +static int ehea_driver_sysfs_add(struct device *dev, + struct device_driver *driver) +{ + int ret; + + ret = sysfs_create_link(&driver->kobj, &dev->kobj, + kobject_name(&dev->kobj)); + if (ret == 0) { + ret = sysfs_create_link(&dev->kobj, &driver->kobj, + "driver"); + if (ret) + sysfs_remove_link(&driver->kobj, + kobject_name(&dev->kobj)); + } + return ret; +} + +static void ehea_driver_sysfs_remove(struct device *dev, + struct device_driver *driver) +{ + struct device_driver *drv = driver; + + if (drv) { + sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj)); + sysfs_remove_link(&dev->kobj, "driver"); + } +} + +static struct device *ehea_register_port(struct ehea_port *port, + struct device_node *dn) +{ + int ret; + + port->ofdev.node = of_node_get(dn); + port->ofdev.dev.parent = &port->adapter->ebus_dev->ofdev.dev; + port->ofdev.dev.bus = &ibmebus_bus_type; + + sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++); + port->ofdev.dev.release = logical_port_release; + + ret = of_device_register(&port->ofdev); + if (ret) { + ehea_error("failed to register device. ret=%d", ret); + goto out; + } + + ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); + if (ret) { + ehea_error("failed to register attributes, ret=%d", ret); + goto out_unreg_of_dev; + } + + ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver); + if (ret) { + ehea_error("failed to register sysfs driver link"); + goto out_rem_dev_file; + } + + return &port->ofdev.dev; + +out_rem_dev_file: + device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); +out_unreg_of_dev: + of_device_unregister(&port->ofdev); +out: + return NULL; +} + +static void ehea_unregister_port(struct ehea_port *port) +{ + ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver); + device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); + of_device_unregister(&port->ofdev); +} + +struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, + u32 logical_port_id, + struct device_node *dn) +{ + int ret; + struct net_device *dev; + struct ehea_port *port; + struct device *port_dev; + int jumbo; + + /* allocate memory for the port structures */ + dev = alloc_etherdev(sizeof(struct ehea_port)); + + if (!dev) { + ehea_error("no mem for net_device"); + ret = -ENOMEM; + goto out_err; + } + + port = netdev_priv(dev); + + sema_init(&port->port_lock, 1); + port->state = EHEA_PORT_DOWN; + port->sig_comp_iv = sq_entries / 10; + + port->adapter = adapter; + port->netdev = dev; + port->logical_port_id = logical_port_id; + + port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); + + port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); + if (!port->mc_list) { + ret = -ENOMEM; + goto out_free_ethdev; + } + + INIT_LIST_HEAD(&port->mc_list->list); + + ret = ehea_sense_port_attr(port); + if (ret) + goto out_free_mc_list; + + port_dev = ehea_register_port(port, dn); + if (!port_dev) + goto out_free_mc_list; + + SET_NETDEV_DEV(dev, port_dev); /* initialize net_device structure */ SET_MODULE_OWNER(dev); @@ -2417,84 +2563,225 @@ static int ehea_setup_single_port(struct ehea_port *port, ret = register_netdev(dev); if (ret) { ehea_error("register_netdev failed. ret=%d", ret); - goto out_free; + goto out_unreg_port; } + ret = ehea_get_jumboframe_status(port, &jumbo); + if (ret) + ehea_error("failed determining jumbo frame status for %s", + port->netdev->name); + ehea_info("%s: Jumbo frames are %sabled", dev->name, jumbo == 1 ? "en" : "dis"); - port->netdev = dev; - ret = 0; - goto out; + return port; -out_free: +out_unreg_port: + ehea_unregister_port(port); + +out_free_mc_list: kfree(port->mc_list); -out: - return ret; + +out_free_ethdev: + free_netdev(dev); + +out_err: + ehea_error("setting up logical port with id=%d failed, ret=%d", + logical_port_id, ret); + return NULL; +} + +static void ehea_shutdown_single_port(struct ehea_port *port) +{ + unregister_netdev(port->netdev); + ehea_unregister_port(port); + kfree(port->mc_list); + free_netdev(port->netdev); } static int ehea_setup_ports(struct ehea_adapter *adapter) { - int ret; - int port_setup_ok = 0; + struct device_node *lhea_dn; + struct device_node *eth_dn = NULL; + + u32 *dn_log_port_id; + int i = 0; + + lhea_dn = adapter->ebus_dev->ofdev.node; + while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { + + dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no", + NULL); + if (!dn_log_port_id) { + ehea_error("bad device node: eth_dn name=%s", + eth_dn->full_name); + continue; + } + + if (ehea_add_adapter_mr(adapter)) { + ehea_error("creating MR failed"); + of_node_put(eth_dn); + return -EIO; + } + + adapter->port[i] = ehea_setup_single_port(adapter, + *dn_log_port_id, + eth_dn); + if (adapter->port[i]) + ehea_info("%s -> logical port id #%d", + adapter->port[i]->netdev->name, + *dn_log_port_id); + else + ehea_remove_adapter_mr(adapter); + + i++; + }; + + return 0; +} + +static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, + u32 logical_port_id) +{ + struct device_node *lhea_dn; + struct device_node *eth_dn = NULL; + u32 *dn_log_port_id; + + lhea_dn = adapter->ebus_dev->ofdev.node; + while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { + + dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no", + NULL); + if (dn_log_port_id) + if (*dn_log_port_id == logical_port_id) + return eth_dn; + }; + + return NULL; +} + +static ssize_t ehea_probe_port(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehea_adapter *adapter = dev->driver_data; struct ehea_port *port; - struct device_node *dn = NULL; - struct net_device *dev; + struct device_node *eth_dn = NULL; int i; - /* get port properties for all ports */ - for (i = 0; i < adapter->num_ports; i++) { + u32 logical_port_id; - if (adapter->port[i]) - continue; /* port already up and running */ + sscanf(buf, "%X", &logical_port_id); - /* allocate memory for the port structures */ - dev = alloc_etherdev(sizeof(struct ehea_port)); + port = ehea_get_port(adapter, logical_port_id); - if (!dev) { - ehea_error("no mem for net_device"); - break; - } + if (port) { + ehea_info("adding port with logical port id=%d failed. port " + "already configured as %s.", logical_port_id, + port->netdev->name); + return -EINVAL; + } - port = netdev_priv(dev); - port->adapter = adapter; - port->netdev = dev; - adapter->port[i] = port; - port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); + eth_dn = ehea_get_eth_dn(adapter, logical_port_id); - dn = of_find_node_by_name(dn, "ethernet"); - ret = ehea_setup_single_port(port, dn); - if (ret) { - /* Free mem for this port struct. The others will be - processed on rollback */ - free_netdev(dev); - adapter->port[i] = NULL; - ehea_error("eHEA port %d setup failed, ret=%d", i, ret); - } + if (!eth_dn) { + ehea_info("no logical port with id %d found", logical_port_id); + return -EINVAL; } - of_node_put(dn); + if (ehea_add_adapter_mr(adapter)) { + ehea_error("creating MR failed"); + return -EIO; + } - /* Check for succesfully set up ports */ - for (i = 0; i < adapter->num_ports; i++) - if (adapter->port[i]) - port_setup_ok++; + port = ehea_setup_single_port(adapter, logical_port_id, eth_dn); - if (port_setup_ok) - ret = 0; /* At least some ports are setup correctly */ - else - ret = -EINVAL; + of_node_put(eth_dn); + + if (port) { + for (i=0; i < EHEA_MAX_PORTS; i++) + if (!adapter->port[i]) { + adapter->port[i] = port; + break; + } + + ehea_info("added %s (logical port id=%d)", port->netdev->name, + logical_port_id); + } else { + ehea_remove_adapter_mr(adapter); + return -EIO; + } + + return (ssize_t) count; +} + +static ssize_t ehea_remove_port(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehea_adapter *adapter = dev->driver_data; + struct ehea_port *port; + int i; + u32 logical_port_id; + + sscanf(buf, "%X", &logical_port_id); + + port = ehea_get_port(adapter, logical_port_id); + + if (port) { + ehea_info("removed %s (logical port id=%d)", port->netdev->name, + logical_port_id); + + ehea_shutdown_single_port(port); + + for (i=0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i] == port) { + adapter->port[i] = NULL; + break; + } + } else { + ehea_error("removing port with logical port id=%d failed. port " + "not configured.", logical_port_id); + return -EINVAL; + } + + ehea_remove_adapter_mr(adapter); + + return (ssize_t) count; +} + +static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); +static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); +int ehea_create_device_sysfs(struct ibmebus_dev *dev) +{ + int ret = device_create_file(&dev->ofdev.dev, &dev_attr_probe_port); + if (ret) + goto out; + + ret = device_create_file(&dev->ofdev.dev, &dev_attr_remove_port); +out: return ret; } -static int __devinit ehea_probe(struct ibmebus_dev *dev, - const struct of_device_id *id) +void ehea_remove_device_sysfs(struct ibmebus_dev *dev) +{ + device_remove_file(&dev->ofdev.dev, &dev_attr_probe_port); + device_remove_file(&dev->ofdev.dev, &dev_attr_remove_port); +} + +static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, + const struct of_device_id *id) { struct ehea_adapter *adapter; u64 *adapter_handle; int ret; + if (!dev || !dev->ofdev.node) { + ehea_error("Invalid ibmebus device probed"); + return -EINVAL; + } + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { ret = -ENOMEM; @@ -2502,6 +2789,8 @@ static int __devinit ehea_probe(struct ibmebus_dev *dev, goto out; } + adapter->ebus_dev = dev; + adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle", NULL); if (adapter_handle) @@ -2518,26 +2807,21 @@ static int __devinit ehea_probe(struct ibmebus_dev *dev, dev->ofdev.dev.driver_data = adapter; - ret = ehea_reg_mr_adapter(adapter); - if (ret) { - dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n"); - goto out_free_ad; - } /* initialize adapter and ports */ /* get adapter properties */ ret = ehea_sense_adapter_attr(adapter); if (ret) { dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret); - goto out_free_res; + goto out_free_ad; } - dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports); adapter->neq = ehea_create_eq(adapter, EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); if (!adapter->neq) { + ret = -EIO; dev_err(&dev->ofdev.dev, "NEQ creation failed"); - goto out_free_res; + goto out_free_ad; } tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, @@ -2552,18 +2836,27 @@ static int __devinit ehea_probe(struct ibmebus_dev *dev, } adapter->ehea_wq = create_workqueue("ehea_wq"); - if (!adapter->ehea_wq) + if (!adapter->ehea_wq) { + ret = -EIO; goto out_free_irq; + } + + ret = ehea_create_device_sysfs(dev); + if (ret) + goto out_kill_wq; ret = ehea_setup_ports(adapter); if (ret) { dev_err(&dev->ofdev.dev, "setup_ports failed"); - goto out_kill_wq; + goto out_rem_dev_sysfs; } ret = 0; goto out; +out_rem_dev_sysfs: + ehea_remove_device_sysfs(dev); + out_kill_wq: destroy_workqueue(adapter->ehea_wq); @@ -2573,45 +2866,32 @@ out_free_irq: out_kill_eq: ehea_destroy_eq(adapter->neq); -out_free_res: - ehea_h_free_resource(adapter->handle, adapter->mr.handle); - out_free_ad: kfree(adapter); out: return ret; } -static void ehea_shutdown_single_port(struct ehea_port *port) -{ - unregister_netdev(port->netdev); - kfree(port->mc_list); - free_netdev(port->netdev); -} - static int __devexit ehea_remove(struct ibmebus_dev *dev) { struct ehea_adapter *adapter = dev->ofdev.dev.driver_data; - u64 hret; int i; - for (i = 0; i < adapter->num_ports; i++) + for (i = 0; i < EHEA_MAX_PORTS; i++) if (adapter->port[i]) { ehea_shutdown_single_port(adapter->port[i]); adapter->port[i] = NULL; } + + ehea_remove_device_sysfs(dev); + destroy_workqueue(adapter->ehea_wq); ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); tasklet_kill(&adapter->neq_tasklet); ehea_destroy_eq(adapter->neq); - - hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle); - if (hret) { - dev_err(&dev->ofdev.dev, "free_resource_mr failed"); - return -EIO; - } + ehea_remove_adapter_mr(adapter); kfree(adapter); return 0; } @@ -2644,21 +2924,6 @@ static int check_module_parm(void) return ret; } -static struct of_device_id ehea_device_table[] = { - { - .name = "lhea", - .compatible = "IBM,lhea", - }, - {}, -}; - -static struct ibmebus_driver ehea_driver = { - .name = "ehea", - .id_table = ehea_device_table, - .probe = ehea_probe, - .remove = ehea_remove, -}; - int __init ehea_module_init(void) { int ret; diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c index bc3c0054726..95c4a7f9cc8 100644 --- a/drivers/net/ehea/ehea_phyp.c +++ b/drivers/net/ehea/ehea_phyp.c @@ -478,12 +478,14 @@ u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle) 0, 0, 0, 0, 0, 0); /* R7-R12 */ } -u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle) +u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, + u64 force_bit) { return ehea_plpar_hcall_norets(H_FREE_RESOURCE, adapter_handle, /* R4 */ res_handle, /* R5 */ - 0, 0, 0, 0, 0); /* R6-R10 */ + force_bit, + 0, 0, 0, 0); /* R7-R10 */ } u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index 90acddb068a..d17a45a7e71 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h @@ -414,7 +414,11 @@ u64 ehea_h_register_rpage(const u64 adapter_handle, u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle); -u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle); +#define FORCE_FREE 1 +#define NORMAL_FREE 0 + +u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, + u64 force_bit); u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, const u64 length, const u32 access_ctrl, diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 96ff3b67999..f24a8862977 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -197,7 +197,7 @@ out_kill_hwq: hw_queue_dtor(&cq->hw_queue); out_freeres: - ehea_h_free_resource(adapter->handle, cq->fw_handle); + ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE); out_freemem: kfree(cq); @@ -206,25 +206,38 @@ out_nomem: return NULL; } -int ehea_destroy_cq(struct ehea_cq *cq) +u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) { - u64 adapter_handle, hret; + u64 hret; + u64 adapter_handle = cq->adapter->handle; + + /* deregister all previous registered pages */ + hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force); + if (hret != H_SUCCESS) + return hret; + + hw_queue_dtor(&cq->hw_queue); + kfree(cq); + + return hret; +} +int ehea_destroy_cq(struct ehea_cq *cq) +{ + u64 hret; if (!cq) return 0; - adapter_handle = cq->adapter->handle; + if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { + ehea_error_data(cq->adapter, cq->fw_handle); + hret = ehea_destroy_cq_res(cq, FORCE_FREE); + } - /* deregister all previous registered pages */ - hret = ehea_h_free_resource(adapter_handle, cq->fw_handle); if (hret != H_SUCCESS) { ehea_error("destroy CQ failed"); return -EIO; } - hw_queue_dtor(&cq->hw_queue); - kfree(cq); - return 0; } @@ -297,7 +310,7 @@ out_kill_hwq: hw_queue_dtor(&eq->hw_queue); out_freeres: - ehea_h_free_resource(adapter->handle, eq->fw_handle); + ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE); out_freemem: kfree(eq); @@ -316,27 +329,41 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq) return eqe; } -int ehea_destroy_eq(struct ehea_eq *eq) +u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force) { u64 hret; unsigned long flags; - if (!eq) - return 0; - spin_lock_irqsave(&eq->spinlock, flags); - hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle); + hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force); spin_unlock_irqrestore(&eq->spinlock, flags); - if (hret != H_SUCCESS) { - ehea_error("destroy_eq failed"); - return -EIO; - } + if (hret != H_SUCCESS) + return hret; hw_queue_dtor(&eq->hw_queue); kfree(eq); + return hret; +} + +int ehea_destroy_eq(struct ehea_eq *eq) +{ + u64 hret; + if (!eq) + return 0; + + if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { + ehea_error_data(eq->adapter, eq->fw_handle); + hret = ehea_destroy_eq_res(eq, FORCE_FREE); + } + + if (hret != H_SUCCESS) { + ehea_error("destroy EQ failed"); + return -EIO; + } + return 0; } @@ -471,41 +498,56 @@ out_kill_hwsq: out_freeres: ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle); - ehea_h_free_resource(adapter->handle, qp->fw_handle); + ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE); out_freemem: kfree(qp); return NULL; } -int ehea_destroy_qp(struct ehea_qp *qp) +u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) { - u64 hret; - struct ehea_qp_init_attr *qp_attr = &qp->init_attr; + u64 hret; + struct ehea_qp_init_attr *qp_attr = &qp->init_attr; - if (!qp) - return 0; - ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); - hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle); - if (hret != H_SUCCESS) { - ehea_error("destroy_qp failed"); - return -EIO; - } + ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); + hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); + if (hret != H_SUCCESS) + return hret; - hw_queue_dtor(&qp->hw_squeue); - hw_queue_dtor(&qp->hw_rqueue1); + hw_queue_dtor(&qp->hw_squeue); + hw_queue_dtor(&qp->hw_rqueue1); - if (qp_attr->rq_count > 1) - hw_queue_dtor(&qp->hw_rqueue2); - if (qp_attr->rq_count > 2) - hw_queue_dtor(&qp->hw_rqueue3); - kfree(qp); + if (qp_attr->rq_count > 1) + hw_queue_dtor(&qp->hw_rqueue2); + if (qp_attr->rq_count > 2) + hw_queue_dtor(&qp->hw_rqueue3); + kfree(qp); - return 0; + return hret; } -int ehea_reg_mr_adapter(struct ehea_adapter *adapter) +int ehea_destroy_qp(struct ehea_qp *qp) +{ + u64 hret; + if (!qp) + return 0; + + if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { + ehea_error_data(qp->adapter, qp->fw_handle); + hret = ehea_destroy_qp_res(qp, FORCE_FREE); + } + + if (hret != H_SUCCESS) { + ehea_error("destroy QP failed"); + return -EIO; + } + + return 0; +} + +int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) { int i, k, ret; u64 hret, pt_abs, start, end, nr_pages; @@ -526,14 +568,14 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter) hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start, acc_ctrl, adapter->pd, - &adapter->mr.handle, &adapter->mr.lkey); + &mr->handle, &mr->lkey); if (hret != H_SUCCESS) { ehea_error("alloc_resource_mr failed"); ret = -EIO; goto out; } - adapter->mr.vaddr = KERNELBASE; + mr->vaddr = KERNELBASE; k = 0; while (nr_pages > 0) { @@ -545,7 +587,7 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter) EHEA_PAGESIZE))); hret = ehea_h_register_rpage_mr(adapter->handle, - adapter->mr.handle, 0, + mr->handle, 0, 0, (u64)pt_abs, num_pages); nr_pages -= num_pages; @@ -554,34 +596,68 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter) (k * EHEA_PAGESIZE))); hret = ehea_h_register_rpage_mr(adapter->handle, - adapter->mr.handle, 0, + mr->handle, 0, 0, abs_adr,1); nr_pages--; } if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) { ehea_h_free_resource(adapter->handle, - adapter->mr.handle); - ehea_error("register_rpage_mr failed: hret = %lX", - hret); + mr->handle, FORCE_FREE); + ehea_error("register_rpage_mr failed"); ret = -EIO; goto out; } } if (hret != H_SUCCESS) { - ehea_h_free_resource(adapter->handle, adapter->mr.handle); - ehea_error("register_rpage failed for last page: hret = %lX", - hret); + ehea_h_free_resource(adapter->handle, mr->handle, + FORCE_FREE); + ehea_error("register_rpage failed for last page"); ret = -EIO; goto out; } + + mr->adapter = adapter; ret = 0; out: kfree(pt); return ret; } +int ehea_rem_mr(struct ehea_mr *mr) +{ + u64 hret; + + if (!mr || !mr->adapter) + return -EINVAL; + + hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, + FORCE_FREE); + if (hret != H_SUCCESS) { + ehea_error("destroy MR failed"); + return -EIO; + } + + return 0; +} + +int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr, + struct ehea_mr *shared_mr) +{ + u64 hret; + + hret = ehea_h_register_smr(adapter->handle, old_mr->handle, + old_mr->vaddr, EHEA_MR_ACC_CTRL, + adapter->pd, shared_mr); + if (hret != H_SUCCESS) + return -EIO; + + shared_mr->adapter = adapter; + + return 0; +} + void print_error_data(u64 *data) { int length; @@ -597,6 +673,14 @@ void print_error_data(u64 *data) ehea_error("QP (resource=%lX) state: AER=0x%lX, AERR=0x%lX, " "port=%lX", resource, data[6], data[12], data[22]); + if (type == 0x4) /* Completion Queue */ + ehea_error("CQ (resource=%lX) state: AER=0x%lX", resource, + data[6]); + + if (type == 0x3) /* Event Queue */ + ehea_error("EQ (resource=%lX) state: AER=0x%lX", resource, + data[6]); + ehea_dump(data, length, "error data"); } diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index 1ff60983504..c0eb3e03a10 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -142,6 +142,8 @@ struct ehea_rwqe { #define EHEA_CQE_STAT_ERR_MASK 0x721F #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F #define EHEA_CQE_STAT_ERR_TCP 0x4000 +#define EHEA_CQE_STAT_ERR_IP 0x2000 +#define EHEA_CQE_STAT_ERR_CRC 0x1000 struct ehea_cqe { u64 wr_id; /* work request ID from WQE */ @@ -320,6 +322,11 @@ static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) return hw_qeit_get_valid(queue); } +static inline void ehea_inc_cq(struct ehea_cq *cq) +{ + hw_qeit_inc(&cq->hw_queue); +} + static inline void ehea_inc_rq1(struct ehea_qp *qp) { hw_qeit_inc(&qp->hw_rqueue1); @@ -327,7 +334,7 @@ static inline void ehea_inc_rq1(struct ehea_qp *qp) static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq) { - return hw_qeit_get_inc_valid(&my_cq->hw_queue); + return hw_qeit_get_valid(&my_cq->hw_queue); } #define EHEA_CQ_REGISTER_ORIG 0 @@ -356,7 +363,12 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd, int ehea_destroy_qp(struct ehea_qp *qp); -int ehea_reg_mr_adapter(struct ehea_adapter *adapter); +int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr); + +int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr, + struct ehea_mr *shared_mr); + +int ehea_rem_mr(struct ehea_mr *mr); void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); |