aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
author <jejb@titanic.il.steeleye.com>2005-04-17 16:05:31 -0500
committerJames Bottomley <jejb@titanic>2005-04-18 13:50:53 -0500
commitdea3101e0a5c897d2c9351a7444e139db9f40247 (patch)
tree61de19e98eed08bb760703b362eab2038c34f261 /drivers
parent8e8790415e91964096f862a58cacb55d2bc9a817 (diff)
lpfc: add Emulex FC driver version 8.0.28
From: James.Smart@Emulex.Com Modified for kernel import and Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/lpfc/Makefile32
-rw-r--r--drivers/scsi/lpfc/lpfc.h384
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c1291
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h97
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h216
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1237
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h206
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3258
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2537
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2687
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1739
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c646
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c179
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1842
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1246
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h157
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2885
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h216
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h32
22 files changed, 20937 insertions, 0 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 718df4c6c3b..750b11cefd9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1314,6 +1314,14 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
+config SCSI_LPFC
+ tristate "Emulex LightPulse Fibre Channel Support"
+ depends on PCI && SCSI
+ select SCSI_FC_ATTRS
+ help
+ This lpfc driver supports the Emulex LightPulse
+ Family of Fibre Channel PCI host adapters.
+
config SCSI_SEAGATE
tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support"
depends on X86 && ISA && SCSI && BROKEN
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 29fcee35ec0..9cb9fe7d623 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_SCSI_QLOGIC_ISP) += qlogicisp.o
obj-$(CONFIG_SCSI_QLOGIC_FC) += qlogicfc.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA2XXX) += qla2xxx/
+obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
obj-$(CONFIG_SCSI_FD_8xx) += seagate.o
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
new file mode 100644
index 00000000000..2b3098591c4
--- /dev/null
+++ b/drivers/scsi/lpfc/Makefile
@@ -0,0 +1,32 @@
+#/*******************************************************************
+# * This file is part of the Emulex Linux Device Driver for *
+# * Enterprise Fibre Channel Host Bus Adapters. *
+# * Refer to the README file included with this package for *
+# * driver version and adapter support. *
+# * Copyright (C) 2004 Emulex Corporation. *
+# * www.emulex.com *
+# * *
+# * This program is free software; you can redistribute it and/or *
+# * modify it under the terms of the GNU General Public License *
+# * as published by the Free Software Foundation; either version 2 *
+# * of the License, or (at your option) any later version. *
+# * *
+# * This program is distributed in the hope that it will be useful, *
+# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+# * GNU General Public License for more details, a copy of which *
+# * can be found in the file COPYING included with this package. *
+# *******************************************************************/
+######################################################################
+
+#$Id: Makefile 1.58 2005/01/23 19:00:32EST sf_support Exp $
+
+ifneq ($(GCOV),)
+ EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage
+ EXTRA_CFLAGS += -O0
+endif
+
+obj-$(CONFIG_SCSI_LPFC) := lpfc.o
+
+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
+ lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
new file mode 100644
index 00000000000..d78247c63d0
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -0,0 +1,384 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc.h 1.167 2005/04/07 08:47:05EDT sf_support Exp $
+ */
+
+struct lpfc_sli2_slim;
+
+#define LPFC_MAX_TARGET 256 /* max targets supported */
+#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */
+#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */
+
+#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
+#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
+#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
+
+#define LPFC_CMD_PER_LUN 30 /* max outstanding cmds per lun */
+#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
+#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
+
+/* Define macros for 64 bit support */
+#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
+#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
+#define getPaddr(high, low) ((dma_addr_t)( \
+ (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
+/* Provide maximum configuration definitions. */
+#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
+#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
+#define FC_MAX_ADPTMSG 64
+
+#define MAX_HBAEVT 32
+
+/* Provide DMA memory definitions the driver uses per port instance. */
+struct lpfc_dmabuf {
+ struct list_head list;
+ void *virt; /* virtual address ptr */
+ dma_addr_t phys; /* mapped address */
+};
+
+struct lpfc_dma_pool {
+ struct lpfc_dmabuf *elements;
+ uint32_t max_count;
+ uint32_t current_count;
+};
+
+/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
+#define MEM_PRI 0x100
+
+
+/****************************************************************************/
+/* Device VPD save area */
+/****************************************************************************/
+typedef struct lpfc_vpd {
+ uint32_t status; /* vpd status value */
+ uint32_t length; /* number of bytes actually returned */
+ struct {
+ uint32_t rsvd1; /* Revision numbers */
+ uint32_t biuRev;
+ uint32_t smRev;
+ uint32_t smFwRev;
+ uint32_t endecRev;
+ uint16_t rBit;
+ uint8_t fcphHigh;
+ uint8_t fcphLow;
+ uint8_t feaLevelHigh;
+ uint8_t feaLevelLow;
+ uint32_t postKernRev;
+ uint32_t opFwRev;
+ uint8_t opFwName[16];
+ uint32_t sli1FwRev;
+ uint8_t sli1FwName[16];
+ uint32_t sli2FwRev;
+ uint8_t sli2FwName[16];
+ } rev;
+} lpfc_vpd_t;
+
+struct lpfc_scsi_buf;
+
+
+/*
+ * lpfc stat counters
+ */
+struct lpfc_stats {
+ /* Statistics for ELS commands */
+ uint32_t elsLogiCol;
+ uint32_t elsRetryExceeded;
+ uint32_t elsXmitRetry;
+ uint32_t elsDelayRetry;
+ uint32_t elsRcvDrop;
+ uint32_t elsRcvFrame;
+ uint32_t elsRcvRSCN;
+ uint32_t elsRcvRNID;
+ uint32_t elsRcvFARP;
+ uint32_t elsRcvFARPR;
+ uint32_t elsRcvFLOGI;
+ uint32_t elsRcvPLOGI;
+ uint32_t elsRcvADISC;
+ uint32_t elsRcvPDISC;
+ uint32_t elsRcvFAN;
+ uint32_t elsRcvLOGO;
+ uint32_t elsRcvPRLO;
+ uint32_t elsRcvPRLI;
+ uint32_t elsRcvRRQ;
+ uint32_t elsXmitFLOGI;
+ uint32_t elsXmitPLOGI;
+ uint32_t elsXmitPRLI;
+ uint32_t elsXmitADISC;
+ uint32_t elsXmitLOGO;
+ uint32_t elsXmitSCR;
+ uint32_t elsXmitRNID;
+ uint32_t elsXmitFARP;
+ uint32_t elsXmitFARPR;
+ uint32_t elsXmitACC;
+ uint32_t elsXmitLSRJT;
+
+ uint32_t frameRcvBcast;
+ uint32_t frameRcvMulti;
+ uint32_t strayXmitCmpl;
+ uint32_t frameXmitDelay;
+ uint32_t xriCmdCmpl;
+ uint32_t xriStatErr;
+ uint32_t LinkUp;
+ uint32_t LinkDown;
+ uint32_t LinkMultiEvent;
+ uint32_t NoRcvBuf;
+ uint32_t fcpCmd;
+ uint32_t fcpCmpl;
+ uint32_t fcpRspErr;
+ uint32_t fcpRemoteStop;
+ uint32_t fcpPortRjt;
+ uint32_t fcpPortBusy;
+ uint32_t fcpError;
+ uint32_t fcpLocalErr;
+};
+
+enum sysfs_mbox_state {
+ SMBOX_IDLE,
+ SMBOX_WRITING,
+ SMBOX_READING
+};
+
+struct lpfc_sysfs_mbox {
+ enum sysfs_mbox_state state;
+ size_t offset;
+ struct lpfcMboxq * mbox;
+};
+
+struct lpfc_hba {
+ struct list_head hba_list; /* List of hbas/ports */
+ struct lpfc_sli sli;
+ struct lpfc_sli2_slim *slim2p;
+ dma_addr_t slim2p_mapping;
+ uint16_t pci_cfg_value;
+
+ uint32_t hba_state;
+
+#define LPFC_INIT_START 1 /* Initial state after board reset */
+#define LPFC_INIT_MBX_CMDS 2 /* Initialize HBA with mbox commands */
+#define LPFC_LINK_DOWN 3 /* HBA initialized, link is down */
+#define LPFC_LINK_UP 4 /* Link is up - issue READ_LA */
+#define LPFC_LOCAL_CFG_LINK 5 /* local NPORT Id configured */
+#define LPFC_FLOGI 6 /* FLOGI sent to Fabric */
+#define LPFC_FABRIC_CFG_LINK 7 /* Fabric assigned NPORT Id
+ configured */
+#define LPFC_NS_REG 8 /* Register with NameServer */
+#define LPFC_NS_QRY 9 /* Query NameServer for NPort ID list */
+#define LPFC_BUILD_DISC_LIST 10 /* Build ADISC and PLOGI lists for
+ * device authentication / discovery */
+#define LPFC_DISC_AUTH 11 /* Processing ADISC list */
+#define LPFC_CLEAR_LA 12 /* authentication cmplt - issue
+ CLEAR_LA */
+#define LPFC_HBA_READY 32
+#define LPFC_HBA_ERROR 0xff
+
+ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
+
+ uint32_t fc_eventTag; /* event tag for link attention */
+ uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
+
+ uint32_t num_disc_nodes; /*in addition to hba_state */
+
+ struct timer_list fc_estabtmo; /* link establishment timer */
+ struct timer_list fc_disctmo; /* Discovery rescue timer */
+ struct timer_list fc_fdmitmo; /* fdmi timer */
+ /* These fields used to be binfo */
+ struct lpfc_name fc_nodename; /* fc nodename */
+ struct lpfc_name fc_portname; /* fc portname */
+ uint32_t fc_pref_DID; /* preferred D_ID */
+ uint8_t fc_pref_ALPA; /* preferred AL_PA */
+ uint32_t fc_edtov; /* E_D_TOV timer value */
+ uint32_t fc_arbtov; /* ARB_TOV timer value */
+ uint32_t fc_ratov; /* R_A_TOV timer value */
+ uint32_t fc_rttov; /* R_T_TOV timer value */
+ uint32_t fc_altov; /* AL_TOV timer value */
+ uint32_t fc_crtov; /* C_R_TOV timer value */
+ uint32_t fc_citov; /* C_I_TOV timer value */
+ uint32_t fc_myDID; /* fibre channel S_ID */
+ uint32_t fc_prevDID; /* previous fibre channel S_ID */
+
+ struct serv_parm fc_sparam; /* buffer for our service parameters */
+ struct serv_parm fc_fabparam; /* fabric service parameters buffer */
+ uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
+
+ uint8_t fc_ns_retry; /* retries for fabric nameserver */
+ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
+ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
+ struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+ uint32_t lmt;
+ uint32_t fc_flag; /* FC flags */
+#define FC_PT2PT 0x1 /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO 0x4 /* Discovery timer running */
+#define FC_PUBLIC_LOOP 0x8 /* Public loop */
+#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
+#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
+#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
+#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
+#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
+#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
+#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
+#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
+
+ uint32_t fc_topology; /* link topology, from LINK INIT */
+
+ struct lpfc_stats fc_stat;
+
+ /* These are the head/tail pointers for the bind, plogi, adisc, unmap,
+ * and map lists. Their counters are immediately following.
+ */
+ struct list_head fc_plogi_list;
+ struct list_head fc_adisc_list;
+ struct list_head fc_reglogin_list;
+ struct list_head fc_prli_list;
+ struct list_head fc_nlpunmap_list;
+ struct list_head fc_nlpmap_list;
+ struct list_head fc_npr_list;
+ struct list_head fc_unused_list;
+
+ /* Keep counters for the number of entries in each list. */
+ uint16_t fc_plogi_cnt;
+ uint16_t fc_adisc_cnt;
+ uint16_t fc_reglogin_cnt;
+ uint16_t fc_prli_cnt;
+ uint16_t fc_unmap_cnt;
+ uint16_t fc_map_cnt;
+ uint16_t fc_npr_cnt;
+ uint16_t fc_unused_cnt;
+ struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
+ uint32_t nport_event_cnt; /* timestamp for nlplist entry */
+
+#define LPFC_RPI_HASH_SIZE 64
+#define LPFC_RPI_HASH_FUNC(x) ((x) & (0x3f))
+ /* ptr to active D_ID / RPIs */
+ struct lpfc_nodelist *fc_nlplookup[LPFC_RPI_HASH_SIZE];
+ uint32_t wwnn[2];
+ uint32_t RandomData[7];
+
+ uint32_t cfg_log_verbose;
+ uint32_t cfg_lun_queue_depth;
+ uint32_t cfg_nodev_tmo;
+ uint32_t cfg_hba_queue_depth;
+ uint32_t cfg_fcp_class;
+ uint32_t cfg_use_adisc;
+ uint32_t cfg_ack0;
+ uint32_t cfg_topology;
+ uint32_t cfg_scan_down;
+ uint32_t cfg_link_speed;
+ uint32_t cfg_cr_delay;
+ uint32_t cfg_cr_count;
+ uint32_t cfg_fdmi_on;
+ uint32_t cfg_fcp_bind_method;
+ uint32_t cfg_discovery_threads;
+ uint32_t cfg_max_luns;
+ uint32_t cfg_sg_seg_cnt;
+ uint32_t cfg_sg_dma_buf_size;
+
+ lpfc_vpd_t vpd; /* vital product data */
+
+ struct Scsi_Host *host;
+ struct pci_dev *pcidev;
+ struct list_head work_list;
+ uint32_t work_ha; /* Host Attention Bits for WT */
+ uint32_t work_ha_mask; /* HA Bits owned by WT */
+ uint32_t work_hs; /* HS stored in case of ERRAT */
+ uint32_t work_status[2]; /* Extra status from SLIM */
+ uint32_t work_hba_events; /* Timeout to be handled */
+#define WORKER_DISC_TMO 0x1 /* Discovery timeout */
+#define WORKER_ELS_TMO 0x2 /* ELS timeout */
+#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
+#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
+
+ wait_queue_head_t *work_wait;
+ struct task_struct *worker_thread;
+
+ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
+ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
+ void __iomem *slim_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0 */
+ void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
+ PCI BAR2 */
+
+ void __iomem *MBslimaddr; /* virtual address for mbox cmds */
+ void __iomem *HAregaddr; /* virtual address for host attn reg */
+ void __iomem *CAregaddr; /* virtual address for chip attn reg */
+ void __iomem *HSregaddr; /* virtual address for host status
+ reg */
+ void __iomem *HCregaddr; /* virtual address for host ctl reg */
+
+ int brd_no; /* FC board number */
+
+ char SerialNumber[32]; /* adapter Serial Number */
+ char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
+ char ModelDesc[256]; /* Model Description */
+ char ModelName[80]; /* Model Name */
+ char ProgramType[256]; /* Program Type */
+ char Port[20]; /* Port No */
+ uint8_t vpd_flag; /* VPD data flag */
+
+#define VPD_MODEL_DESC 0x1 /* valid vpd model description */
+#define VPD_MODEL_NAME 0x2 /* valid vpd model name */
+#define VPD_PROGRAM_TYPE 0x4 /* valid vpd program type */
+#define VPD_PORT 0x8 /* valid vpd port data */
+#define VPD_MASK 0xf /* mask for any vpd data */
+
+ struct timer_list els_tmofunc;
+
+ void *link_stats;
+
+ /*
+ * stat counters
+ */
+ uint64_t fc4InputRequests;
+ uint64_t fc4OutputRequests;
+ uint64_t fc4ControlRequests;
+
+ struct lpfc_sysfs_mbox sysfs_mbox;
+
+ /* fastpath list. */
+ struct list_head lpfc_scsi_buf_list;
+ uint32_t total_scsi_bufs;
+ struct list_head lpfc_iocb_list;
+ uint32_t total_iocbq_bufs;
+
+ /* pci_mem_pools */
+ struct pci_pool *lpfc_scsi_dma_buf_pool;
+ struct pci_pool *lpfc_mbuf_pool;
+ struct lpfc_dma_pool lpfc_mbuf_safety_pool;
+
+ mempool_t *mbox_mem_pool;
+ mempool_t *nlp_mem_pool;
+ struct list_head freebufList;
+ struct list_head ctrspbuflist;
+ struct list_head rnidrspbuflist;
+};
+
+
+struct rnidrsp {
+ void *buf;
+ uint32_t uniqueid;
+ struct list_head list;
+ uint32_t data;
+};
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
new file mode 100644
index 00000000000..1276bd77b99
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -0,0 +1,1291 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_attr.c 1.24 2005/04/13 11:58:55EDT sf_support Exp $
+ */
+
+#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_crtn.h"
+
+
+static void
+lpfc_jedec_to_ascii(int incr, char hdw[])
+{
+ int i, j;
+ for (i = 0; i < 8; i++) {
+ j = (incr & 0xf);
+ if (j <= 9)
+ hdw[7 - i] = 0x30 + j;
+ else
+ hdw[7 - i] = 0x61 + j - 10;
+ incr = (incr >> 4);
+ }
+ hdw[8] = 0;
+ return;
+}
+
+static ssize_t
+lpfc_drvr_version_show(struct class_device *cdev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+}
+
+static ssize_t
+management_version_show(struct class_device *cdev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, DFC_API_VERSION "\n");
+}
+
+static ssize_t
+lpfc_info_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+}
+
+static ssize_t
+lpfc_serialnum_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+}
+
+static ssize_t
+lpfc_modeldesc_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+}
+
+static ssize_t
+lpfc_modelname_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+}
+
+static ssize_t
+lpfc_programtype_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+}
+
+static ssize_t
+lpfc_portnum_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+}
+
+static ssize_t
+lpfc_fwrev_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ char fwrev[32];
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
+}
+
+static ssize_t
+lpfc_hdw_show(struct class_device *cdev, char *buf)
+{
+ char hdw[9];
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ lpfc_vpd_t *vp = &phba->vpd;
+ lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+}
+static ssize_t
+lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+}
+static ssize_t
+lpfc_state_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ int len = 0;
+ switch (phba->hba_state) {
+ case LPFC_INIT_START:
+ case LPFC_INIT_MBX_CMDS:
+ case LPFC_LINK_DOWN:
+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
+ break;
+ case LPFC_LINK_UP:
+ case LPFC_LOCAL_CFG_LINK:
+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
+ break;
+ case LPFC_FLOGI:
+ case LPFC_FABRIC_CFG_LINK:
+ case LPFC_NS_REG:
+ case LPFC_NS_QRY:
+ case LPFC_BUILD_DISC_LIST:
+ case LPFC_DISC_AUTH:
+ case LPFC_CLEAR_LA:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Link Up - Discovery\n");
+ break;
+ case LPFC_HBA_READY:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Link Up - Ready:\n");
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ if (phba->fc_flag & FC_PUBLIC_LOOP)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Public Loop\n");
+ else
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Private Loop\n");
+ } else {
+ if (phba->fc_flag & FC_FABRIC)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Fabric\n");
+ else
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Point-2-Point\n");
+ }
+ }
+ return len;
+}
+
+static ssize_t
+lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt +
+ phba->fc_unmap_cnt);
+}
+
+
+static ssize_t
+lpfc_issue_lip (struct class_device *cdev, const char *buf, size_t count)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
+ int val = 0;
+ LPFC_MBOXQ_t *pmboxq;
+ int mbxstatus = MBXERR_ERROR;
+
+ if ((sscanf(buf, "%d", &val) != 1) ||
+ (val != 1))
+ return -EINVAL;
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (phba->hba_state != LPFC_HBA_READY))
+ return -EPERM;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+
+ if (!pmboxq)
+ return -ENOMEM;
+
+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+ lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed);
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (mbxstatus == MBX_TIMEOUT)
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ else
+ mempool_free( pmboxq, phba->mbox_mem_pool);
+
+ if (mbxstatus == MBXERR_ERROR)
+ return -EIO;
+
+ return strlen(buf);
+}
+
+static ssize_t
+lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+}
+
+static ssize_t
+lpfc_board_online_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+
+ if (!phba) return 0;
+
+ if (phba->fc_flag & FC_OFFLINE_MODE)
+ return snprintf(buf, PAGE_SIZE, "0\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "1\n");
+}
+
+static ssize_t
+lpfc_board_online_store(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ struct completion online_compl;
+ int val=0, status=0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return 0;
+
+ init_completion(&online_compl);
+
+ if (val)
+ lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ else
+ lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_OFFLINE);
+ wait_for_completion(&online_compl);
+ if (!status)
+ return strlen(buf);
+ else
+ return 0;
+}
+
+
+#define lpfc_param_show(attr) \
+static ssize_t \
+lpfc_##attr##_show(struct class_device *cdev, char *buf) \
+{ \
+ struct Scsi_Host *host = class_to_shost(cdev);\
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
+ int val = 0;\
+ if (phba){\
+ val = phba->cfg_##attr;\
+ return snprintf(buf, PAGE_SIZE, "%d\n",\
+ phba->cfg_##attr);\
+ }\
+ return 0;\
+}
+
+#define lpfc_param_store(attr, minval, maxval) \
+static ssize_t \
+lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
+{ \
+ struct Scsi_Host *host = class_to_shost(cdev);\
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
+ int val = 0;\
+ if (!isdigit(buf[0]))\
+ return -EINVAL;\
+ if (sscanf(buf, "0x%x", &val) != 1)\
+ if (sscanf(buf, "%d", &val) != 1)\
+ return -EINVAL;\
+ if (phba){\
+ if (val >= minval && val <= maxval) {\
+ phba->cfg_##attr = val;\
+ return strlen(buf);\
+ }\
+ }\
+ return 0;\
+}
+
+#define LPFC_ATTR_R_NOINIT(name, desc) \
+extern int lpfc_##name;\
+module_param(lpfc_##name, int, 0);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
+static int lpfc_##name = defval;\
+module_param(lpfc_##name, int, 0);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
+static int lpfc_##name = defval;\
+module_param(lpfc_##name, int, 0);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_store(name, minval, maxval)\
+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+static CLASS_DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
+static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
+static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
+static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
+static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
+static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL);
+static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
+static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
+static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
+static CLASS_DEVICE_ATTR(option_rom_version, S_IRUGO,
+ lpfc_option_rom_version_show, NULL);
+static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO,
+ lpfc_num_discovered_ports_show, NULL);
+static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
+static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
+ NULL);
+static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
+ NULL);
+static CLASS_DEVICE_ATTR(issue_lip, S_IWUSR, NULL, lpfc_issue_lip);
+static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
+ lpfc_board_online_show, lpfc_board_online_store);
+
+
+/*
+# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
+# deluged with LOTS of information.
+# You can set a bit mask to record specific types of verbose messages:
+#
+# LOG_ELS 0x1 ELS events
+# LOG_DISCOVERY 0x2 Link discovery events
+# LOG_MBOX 0x4 Mailbox events
+# LOG_INIT 0x8 Initialization events
+# LOG_LINK_EVENT 0x10 Link events
+# LOG_IP 0x20 IP traffic history
+# LOG_FCP 0x40 FCP traffic history
+# LOG_NODE 0x80 Node table events
+# LOG_MISC 0x400 Miscellaneous events
+# LOG_SLI 0x800 SLI events
+# LOG_CHK_COND 0x1000 FCP Check condition flag
+# LOG_LIBDFC 0x2000 LIBDFC events
+# LOG_ALL_MSG 0xffff LOG all messages
+*/
+LPFC_ATTR_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask");
+
+/*
+# lun_queue_depth: This parameter is used to limit the number of outstanding
+# commands per FCP LUN. Value range is [1,128]. Default value is 30.
+*/
+LPFC_ATTR_R(lun_queue_depth, 30, 1, 128,
+ "Max number of FCP commands we can queue to a specific LUN");
+
+/*
+# Some disk devices have a "select ID" or "select Target" capability.
+# From a protocol standpoint "select ID" usually means select the
+# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
+# annex" which contains a table that maps a "select ID" (a number
+# between 0 and 7F) to an ALPA. By default, for compatibility with
+# older drivers, the lpfc driver scans this table from low ALPA to high
+# ALPA.
+#
+# Turning on the scan-down variable (on = 1, off = 0) will
+# cause the lpfc driver to use an inverted table, effectively
+# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
+#
+# (Note: This "select ID" functionality is a LOOP ONLY characteristic
+# and will not work across a fabric. Also this parameter will take
+# effect only in the case when ALPA map is not available.)
+*/
+LPFC_ATTR_R(scan_down, 1, 0, 1,
+ "Start scanning for devices from highest ALPA to lowest");
+
+/*
+# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
+# until the timer expires. Value range is [0,255]. Default value is 20.
+# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
+*/
+LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
+ "Seconds driver will hold I/O waiting for a device to come back");
+
+/*
+# lpfc_topology: link topology for init link
+# 0x0 = attempt loop mode then point-to-point
+# 0x02 = attempt point-to-point mode only
+# 0x04 = attempt loop mode only
+# 0x06 = attempt point-to-point mode then loop
+# Set point-to-point mode if you want to run as an N_Port.
+# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
+# Default value is 0.
+*/
+LPFC_ATTR_R(topology, 0, 0, 6, "Select Fibre Channel topology");
+
+/*
+# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
+# connection.
+# 0 = auto select (default)
+# 1 = 1 Gigabaud
+# 2 = 2 Gigabaud
+# 4 = 4 Gigabaud
+# Value range is [0,4]. Default value is 0.
+*/
+LPFC_ATTR_R(link_speed, 0, 0, 4, "Select link speed");
+
+/*
+# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
+# Value range is [2,3]. Default value is 3.
+*/
+LPFC_ATTR_R(fcp_class, 3, 2, 3,
+ "Select Fibre Channel class of service for FCP sequences");
+
+/*
+# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
+# is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_RW(use_adisc, 0, 0, 1,
+ "Use ADISC on rediscovery to authenticate FCP devices");
+
+/*
+# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
+# range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
+
+/*
+# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
+# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
+# value [0,63]. cr_count can take value [0,255]. Default value of cr_delay
+# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
+# cr_delay is set to 0.
+*/
+static int lpfc_cr_delay = 0;
+module_param(lpfc_cr_delay, int , 0);
+MODULE_PARM_DESC(lpfc_cr_delay, "A count of milliseconds after which an "
+ "interrupt response is generated");
+
+static int lpfc_cr_count = 1;
+module_param(lpfc_cr_count, int, 0);
+MODULE_PARM_DESC(lpfc_cr_count, "A count of I/O completions after which an "
+ "interrupt response is generated");
+
+/*
+# lpfc_fdmi_on: controls FDMI support.
+# 0 = no FDMI support
+# 1 = support FDMI without attribute of hostname
+# 2 = support FDMI with attribute of hostname
+# Value range [0,2]. Default value is 0.
+*/
+LPFC_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
+
+/*
+# Specifies the maximum number of ELS cmds we can have outstanding (for
+# discovery). Value range is [1,64]. Default value = 32.
+*/
+static int lpfc_discovery_threads = 32;
+module_param(lpfc_discovery_threads, int, 0);
+MODULE_PARM_DESC(lpfc_discovery_threads, "Maximum number of ELS commands "
+ "during discovery");
+
+/*
+# lpfc_max_luns: maximum number of LUNs per target driver will support
+# Value range is [1,32768]. Default value is 256.
+# NOTE: The SCSI layer will scan each target for this many luns
+*/
+LPFC_ATTR_R(max_luns, 256, 1, 32768,
+ "Maximum number of LUNs per target driver will support");
+
+struct class_device_attribute *lpfc_host_attrs[] = {
+ &class_device_attr_info,
+ &class_device_attr_serialnum,
+ &class_device_attr_modeldesc,
+ &class_device_attr_modelname,
+ &class_device_attr_programtype,
+ &class_device_attr_portnum,
+ &class_device_attr_fwrev,
+ &class_device_attr_hdw,
+ &class_device_attr_option_rom_version,
+ &class_device_attr_state,
+ &class_device_attr_num_discovered_ports,
+ &class_device_attr_lpfc_drvr_version,
+ &class_device_attr_lpfc_log_verbose,
+ &class_device_attr_lpfc_lun_queue_depth,
+ &class_device_attr_lpfc_nodev_tmo,
+ &class_device_attr_lpfc_fcp_class,
+ &class_device_attr_lpfc_use_adisc,
+ &class_device_attr_lpfc_ack0,
+ &class_device_attr_lpfc_topology,
+ &class_device_attr_lpfc_scan_down,
+ &class_device_attr_lpfc_link_speed,
+ &class_device_attr_lpfc_fdmi_on,
+ &class_device_attr_lpfc_max_luns,
+ &class_device_attr_nport_evt_cnt,
+ &class_device_attr_management_version,
+ &class_device_attr_issue_lip,
+ &class_device_attr_board_online,
+ NULL,
+};
+
+static ssize_t
+sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+ size_t buf_off;
+ struct Scsi_Host *host = class_to_shost(container_of(kobj,
+ struct class_device, kobj));
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+
+ if ((off + count) > FF_REG_AREA_SIZE)
+ return -ERANGE;
+
+ if (count == 0) return 0;
+
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+ spin_lock_irq(phba->host->host_lock);
+
+ if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
+ spin_unlock_irq(phba->host->host_lock);
+ return -EPERM;
+ }
+
+ for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
+ writel(*((uint32_t *)(buf + buf_off)),
+ phba->ctrl_regs_memmap_p + off + buf_off);
+
+ spin_unlock_irq(phba->host->host_lock);
+
+ return count;
+}
+
+static ssize_t
+sysfs_ctlreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+ size_t buf_off;
+ uint32_t * tmp_ptr;
+ struct Scsi_Host *host = class_to_shost(container_of(kobj,
+ struct class_device, kobj));
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+
+ if (off > FF_REG_AREA_SIZE)
+ return -ERANGE;
+
+ if ((off + count) > FF_REG_AREA_SIZE)
+ count = FF_REG_AREA_SIZE - off;
+
+ if (count == 0) return 0;
+
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+ spin_lock_irq(phba->host->host_lock);
+
+ for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
+ tmp_ptr = (uint32_t *)(buf + buf_off);
+ *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
+ }
+
+ spin_unlock_irq(phba->host->host_lock);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_ctlreg_attr = {
+ .attr = {
+ .name = "ctlreg",
+ .mode = S_IRUSR | S_IWUSR,
+ .owner = THIS_MODULE,
+ },
+ .size = 256,
+ .read = sysfs_ctlreg_read,
+ .write = sysfs_ctlreg_write,
+};
+
+
+static void
+sysfs_mbox_idle (struct lpfc_hba * phba)
+{
+ phba->sysfs_mbox.state = SMBOX_IDLE;
+ phba->sysfs_mbox.offset = 0;
+
+ if (phba->sysfs_mbox.mbox) {
+ mempool_free(phba->sysfs_mbox.mbox,
+ phba->mbox_mem_pool);
+ phba->sysfs_mbox.mbox = NULL;
+ }
+}
+
+static ssize_t
+sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+ struct Scsi_Host * host =
+ class_to_shost(container_of(kobj, struct class_device, kobj));
+ struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata[0];
+ struct lpfcMboxq * mbox = NULL;
+
+ if ((count + off) > MAILBOX_CMD_SIZE)
+ return -ERANGE;
+
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+ if (count == 0)
+ return 0;
+
+ if (off == 0) {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ }
+
+ spin_lock_irq(host->host_lock);
+
+ if (off == 0) {
+ if (phba->sysfs_mbox.mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ else
+ phba->sysfs_mbox.mbox = mbox;
+ phba->sysfs_mbox.state = SMBOX_WRITING;
+ } else {
+ if (phba->sysfs_mbox.state != SMBOX_WRITING ||
+ phba->sysfs_mbox.offset != off ||
+ phba->sysfs_mbox.mbox == NULL ) {
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(host->host_lock);
+ return -EINVAL;
+ }
+ }
+
+ memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
+ buf, count);
+
+ phba->sysfs_mbox.offset = off + count;
+
+ spin_unlock_irq(host->host_lock);
+
+ return count;
+}
+
+static ssize_t
+sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+ struct Scsi_Host *host =
+ class_to_shost(container_of(kobj, struct class_device,
+ kobj));
+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
+ int rc;
+
+ if (off > sizeof(MAILBOX_t))
+ return -ERANGE;
+
+ if ((count + off) > sizeof(MAILBOX_t))
+ count = sizeof(MAILBOX_t) - off;
+
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+ if (off && count == 0)
+ return 0;
+
+ spin_lock_irq(phba->host->host_lock);
+
+ if (off == 0 &&
+ phba->sysfs_mbox.state == SMBOX_WRITING &&
+ phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
+
+ switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
+ /* Offline only */
+ case MBX_WRITE_NV:
+ case MBX_INIT_LINK:
+ case MBX_DOWN_LINK:
+ case MBX_CONFIG_LINK:
+ case MBX_CONFIG_RING:
+ case MBX_RESET_RING:
+ case MBX_UNREG_LOGIN:
+ case MBX_CLEAR_LA:
+ case MBX_DUMP_CONTEXT:
+ case MBX_RUN_DIAGS:
+ case MBX_RESTART:
+ case MBX_FLASH_WR_ULA:
+ case MBX_SET_MASK:
+ case MBX_SET_SLIM:
+ case MBX_SET_DEBUG:
+ if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
+ printk(KERN_WARNING "mbox_read:Command 0x%x "
+ "is illegal in on-line state\n",
+ phba->sysfs_mbox.mbox->mb.mbxCommand);
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(phba->host->host_lock);
+ return -EPERM;
+ }
+ case MBX_LOAD_SM:
+ case MBX_READ_NV:
+ case MBX_READ_CONFIG:
+ case MBX_READ_RCONFIG:
+ case MBX_READ_STATUS:
+ case MBX_READ_XRI:
+ case MBX_READ_REV:
+ case MBX_READ_LNK_STAT:
+ case MBX_DUMP_MEMORY:
+ case MBX_DOWN_LOAD:
+ case MBX_UPDATE_CFG:
+ case MBX_LOAD_AREA:
+ case MBX_LOAD_EXP_ROM:
+ break;
+ case MBX_READ_SPARM64:
+ case MBX_READ_LA:
+ case MBX_READ_LA64:
+ case MBX_REG_LOGIN:
+ case MBX_REG_LOGIN64:
+ case MBX_CONFIG_PORT:
+ case MBX_RUN_BIU_DIAG:
+ printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
+ phba->sysfs_mbox.mbox->mb.mbxCommand);
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(phba->host->host_lock);
+ return -EPERM;
+ default:
+ printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
+ phba->sysfs_mbox.mbox->mb.mbxCommand);
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(phba->host->host_lock);
+ return -EPERM;
+ }
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
+
+ spin_unlock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_mbox (phba,
+ phba->sysfs_mbox.mbox,
+ MBX_POLL);
+ spin_lock_irq(phba->host->host_lock);
+
+ } else {
+ spin_unlock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_mbox_wait (phba,
+ phba->sysfs_mbox.mbox,
+ phba->fc_ratov * 2);
+ spin_lock_irq(phba->host->host_lock);
+ }
+
+ if (rc != MBX_SUCCESS) {
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(host->host_lock);
+ return -ENODEV;
+ }
+ phba->sysfs_mbox.state = SMBOX_READING;
+ }
+ else if (phba->sysfs_mbox.offset != off ||
+ phba->sysfs_mbox.state != SMBOX_READING) {
+ printk(KERN_WARNING "mbox_read: Bad State\n");
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(host->host_lock);
+ return -EINVAL;
+ }
+
+ memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
+
+ phba->sysfs_mbox.offset = off + count;
+
+ if (phba->sysfs_mbox.offset == sizeof(MAILBOX_t))
+ sysfs_mbox_idle(phba);
+
+ spin_unlock_irq(phba->host->host_lock);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_mbox_attr = {
+ .attr = {
+ .name = "mbox",
+ .mode = S_IRUSR | S_IWUSR,
+ .owner = THIS_MODULE,
+ },
+ .size = sizeof(MAILBOX_t),
+ .read = sysfs_mbox_read,
+ .write = sysfs_mbox_write,
+};
+
+int
+lpfc_alloc_sysfs_attr(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *host = phba->host;
+ int error;
+
+ error = sysfs_create_bin_file(&host->shost_classdev.kobj,
+ &sysfs_ctlreg_attr);
+ if (error)
+ goto out;
+
+ error = sysfs_create_bin_file(&host->shost_classdev.kobj,
+ &sysfs_mbox_attr);
+ if (error)
+ goto out_remove_ctlreg_attr;
+
+ return 0;
+out_remove_ctlreg_attr:
+ sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+out:
+ return error;
+}
+
+void
+lpfc_free_sysfs_attr(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *host = phba->host;
+
+ sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
+ sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+}
+
+
+/*
+ * Dynamic FC Host Attributes Support
+ */
+
+static void
+lpfc_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+ /* note: fc_myDID already in cpu endianness */
+ fc_host_port_id(shost) = phba->fc_myDID;
+}
+
+static void
+lpfc_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+
+ spin_lock_irq(shost->host_lock);
+
+ if (phba->hba_state == LPFC_HBA_READY) {
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ if (phba->fc_flag & FC_PUBLIC_LOOP)
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
+ } else {
+ if (phba->fc_flag & FC_FABRIC)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ }
+ } else
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+
+ spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+
+ spin_lock_irq(shost->host_lock);
+
+ if (phba->fc_flag & FC_OFFLINE_MODE)
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ else {
+ switch (phba->hba_state) {
+ case LPFC_INIT_START:
+ case LPFC_INIT_MBX_CMDS:
+ case LPFC_LINK_DOWN:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LPFC_LINK_UP:
+ case LPFC_LOCAL_CFG_LINK:
+ case LPFC_FLOGI:
+ case LPFC_FABRIC_CFG_LINK:
+ case LPFC_NS_REG:
+ case LPFC_NS_QRY:
+ case LPFC_BUILD_DISC_LIST:
+ case LPFC_DISC_AUTH:
+ case LPFC_CLEAR_LA:
+ case LPFC_HBA_READY:
+ /* Links up, beyond this port_type reports state */
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ case LPFC_HBA_ERROR:
+ fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
+ }
+
+ spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_get_host_speed(struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+
+ spin_lock_irq(shost->host_lock);
+
+ if (phba->hba_state == LPFC_HBA_READY) {
+ switch(phba->fc_linkspeed) {
+ case LA_1GHZ_LINK:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case LA_2GHZ_LINK:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case LA_4GHZ_LINK:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ }
+
+ spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_get_host_fabric_name (struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata[0];
+ u64 nodename;
+
+ spin_lock_irq(shost->host_lock);
+
+ if ((phba->fc_flag & FC_FABRIC) ||
+ ((phba->fc_topology == TOPOLOGY_LOOP) &&
+ (phba->fc_flag & FC_PUBLIC_LOOP)))
+ memcpy(&nodename, &phba->fc_fabparam.nodeName, sizeof(u64));
+ else
+ /* fabric is local port if there is no F/FL_Port */
+ memcpy(&nodename, &phba->fc_nodename, sizeof(u64));
+
+ spin_unlock_irq(shost->host_lock);
+
+ fc_host_fabric_name(shost) = be64_to_cpu(nodename);
+}
+
+
+static struct fc_host_statistics *
+lpfc_get_stats(struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
+ struct lpfc_sli *psli = &phba->sli;
+ struct fc_host_statistics *hs =
+ (struct fc_host_statistics *)phba->link_stats;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ int rc=0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return NULL;
+ memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->mb;
+ pmb->mbxCommand = MBX_READ_STATUS;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE))){
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ } else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (pmboxq) {
+ if (rc == MBX_TIMEOUT)
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ else
+ mempool_free( pmboxq, phba->mbox_mem_pool);
+ }
+ return NULL;
+ }
+
+ hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+ hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
+ hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+ hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256);
+
+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+ pmb->mbxCommand = MBX_READ_LNK_STAT;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) {
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ } else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (pmboxq) {
+ if (rc == MBX_TIMEOUT)
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ else
+ mempool_free( pmboxq, phba->mbox_mem_pool);
+ }
+ return NULL;
+ }
+
+ hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+ hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+ hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+ hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+ hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+ hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+ hs->error_frames = pmb->un.varRdLnk.crcCnt;
+
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ hs->lip_count = (phba->fc_eventTag >> 1);
+ hs->nos_count = -1;
+ } else {
+ hs->lip_count = -1;
+ hs->nos_count = (phba->fc_eventTag >> 1);
+ }
+
+ hs->dumped_frames = -1;
+
+/* FIX ME */
+ /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/
+
+ return hs;
+}
+
+
+/*
+ * The LPFC driver treats linkdown handling as target loss events so there
+ * are no sysfs handlers for link_down_tmo.
+ */
+static void
+lpfc_get_starget_port_id(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
+ uint32_t did = -1;
+ struct lpfc_nodelist *ndlp = NULL;
+
+ spin_lock_irq(shost->host_lock);
+ /* Search the mapped list for this target ID */
+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+ if (starget->id == ndlp->nlp_sid) {
+ did = ndlp->nlp_DID;
+ break;
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ fc_starget_port_id(starget) = did;
+}
+
+static void
+lpfc_get_starget_node_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
+ uint64_t node_name = 0;
+ struct lpfc_nodelist *ndlp = NULL;
+
+ spin_lock_irq(shost->host_lock);
+ /* Search the mapped list for this target ID */
+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+ if (starget->id == ndlp->nlp_sid) {
+ memcpy(&node_name, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ break;
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ fc_starget_node_name(starget) = be64_to_cpu(node_name);
+}
+
+static void
+lpfc_get_starget_port_name(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
+ uint64_t port_name = 0;
+ struct lpfc_nodelist *ndlp = NULL;
+
+ spin_lock_irq(shost->host_lock);
+ /* Search the mapped list for this target ID */
+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+ if (starget->id == ndlp->nlp_sid) {
+ memcpy(&port_name, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ break;
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ fc_starget_port_name(starget) = be64_to_cpu(port_name);
+}
+
+static void
+lpfc_get_rport_loss_tmo(struct fc_rport *rport)
+{
+ /*
+ * Return the driver's global value for device loss timeout plus
+ * five seconds to allow the driver's nodev timer to run.
+ */
+ rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
+}
+
+static void
+lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ /*
+ * The driver doesn't have a per-target timeout setting. Set
+ * this value globally. lpfc_nodev_tmo should be greater then 0.
+ */
+ if (timeout)
+ lpfc_nodev_tmo = timeout;
+ else
+ lpfc_nodev_tmo = 1;
+ rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
+}
+
+
+#define lpfc_rport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+lpfc_show_rport_##field (struct class_device *cdev, char *buf) \
+{ \
+ struct fc_rport *rport = transport_class_to_rport(cdev); \
+ struct lpfc_rport_data *rdata = rport->hostdata; \
+ return snprintf(buf, sz, format_string, \
+ (rdata->target) ? cast rdata->target->field : 0); \
+}
+
+#define lpfc_rport_rd_attr(field, format_string, sz) \
+ lpfc_rport_show_function(field, format_string, sz, ) \
+static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
+
+
+struct fc_function_template lpfc_transport_functions = {
+ /* fixed attributes the driver supports */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_symbolic_name = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* dynamic attributes the driver supports */
+ .get_host_port_id = lpfc_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = lpfc_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = lpfc_get_host_port_state,
+ .show_host_port_state = 1,
+
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = lpfc_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = lpfc_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ /*
+ * The LPFC driver treats linkdown handling as target loss events
+ * so there are no sysfs handlers for link_down_tmo.
+ */
+
+ .get_fc_host_stats = lpfc_get_stats,
+
+ /* the LPFC driver doesn't support resetting stats yet */
+
+ .dd_fcrport_size = sizeof(struct lpfc_rport_data),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo,
+ .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .get_starget_port_id = lpfc_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .get_starget_node_name = lpfc_get_starget_node_name,
+ .show_starget_node_name = 1,
+
+ .get_starget_port_name = lpfc_get_starget_port_name,
+ .show_starget_port_name = 1,
+};
+
+void
+lpfc_get_cfgparam(struct lpfc_hba *phba)
+{
+ phba->cfg_log_verbose = lpfc_log_verbose;
+ phba->cfg_cr_delay = lpfc_cr_delay;
+ phba->cfg_cr_count = lpfc_cr_count;
+ phba->cfg_lun_queue_depth = lpfc_lun_queue_depth;
+ phba->cfg_fcp_class = lpfc_fcp_class;
+ phba->cfg_use_adisc = lpfc_use_adisc;
+ phba->cfg_ack0 = lpfc_ack0;
+ phba->cfg_topology = lpfc_topology;
+ phba->cfg_scan_down = lpfc_scan_down;
+ phba->cfg_nodev_tmo = lpfc_nodev_tmo;
+ phba->cfg_link_speed = lpfc_link_speed;
+ phba->cfg_fdmi_on = lpfc_fdmi_on;
+ phba->cfg_discovery_threads = lpfc_discovery_threads;
+ phba->cfg_max_luns = lpfc_max_luns;
+
+ /*
+ * The total number of segments is the configuration value plus 2
+ * since the IOCB need a command and response bde.
+ */
+ phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2;
+
+ /*
+ * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be dynamically calculated
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64));
+
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LP101:
+ case PCI_DEVICE_ID_BSMB:
+ case PCI_DEVICE_ID_ZSMB:
+ phba->cfg_hba_queue_depth = LPFC_LP101_HBA_Q_DEPTH;
+ break;
+ case PCI_DEVICE_ID_RFLY:
+ case PCI_DEVICE_ID_PFLY:
+ case PCI_DEVICE_ID_BMID:
+ case PCI_DEVICE_ID_ZMID:
+ case PCI_DEVICE_ID_TFLY:
+ phba->cfg_hba_queue_depth = LPFC_LC_HBA_Q_DEPTH;
+ break;
+ default:
+ phba->cfg_hba_queue_depth = LPFC_DFT_HBA_Q_DEPTH;
+ }
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
new file mode 100644
index 00000000000..646649fe962
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -0,0 +1,97 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_compat.h 1.32 2005/01/25 17:51:45EST sf_support Exp $
+ *
+ * This file provides macros to aid compilation in the Linux 2.4 kernel
+ * over various platform architectures.
+ */
+
+/*******************************************************************
+Note: HBA's SLI memory contains little-endian LW.
+Thus to access it from a little-endian host,
+memcpy_toio() and memcpy_fromio() can be used.
+However on a big-endian host, copy 4 bytes at a time,
+using writel() and readl().
+ *******************************************************************/
+
+#if __BIG_ENDIAN
+
+static inline void
+lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes)
+{
+ uint32_t __iomem *dest32;
+ uint32_t *src32;
+ unsigned int four_bytes;
+
+
+ dest32 = (uint32_t __iomem *) dest;
+ src32 = (uint32_t *) src;
+
+ /* write input bytes, 4 bytes at a time */
+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+ writel( *src32, dest32);
+ readl(dest32); /* flush */
+ dest32++;
+ src32++;
+ }
+
+ return;
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+ uint32_t *dest32;
+ uint32_t __iomem *src32;
+ unsigned int four_bytes;
+
+
+ dest32 = (uint32_t *) dest;
+ src32 = (uint32_t __iomem *) src;
+
+ /* read input bytes, 4 bytes at a time */
+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+ *dest32 = readl( src32);
+ dest32++;
+ src32++;
+ }
+
+ return;
+}
+
+#else
+
+static inline void
+lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
+{
+ /* actually returns 1 byte past dest */
+ memcpy_toio( dest, src, bytes);
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+ /* actually returns 1 byte past dest */
+ memcpy_fromio( dest, src, bytes);
+}
+
+#endif /* __BIG_ENDIAN */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
new file mode 100644
index 00000000000..c504477a6a5
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -0,0 +1,216 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_crtn.h 1.166 2005/04/07 08:46:47EDT sf_support Exp $
+ */
+
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
+ struct lpfc_dmabuf *mp);
+void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
+ uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+
+
+int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+
+void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_nlp_plogi(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_adisc(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_unmapped(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_nlp_list(struct lpfc_hba *, struct lpfc_nodelist *, int);
+void lpfc_set_disctmo(struct lpfc_hba *);
+int lpfc_can_disctmo(struct lpfc_hba *);
+int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *, struct lpfc_nodelist *);
+int lpfc_nlp_remove(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
+struct lpfc_nodelist *lpfc_setup_rscn_node(struct lpfc_hba *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_hba *);
+void lpfc_disc_start(struct lpfc_hba *);
+void lpfc_disc_flush_list(struct lpfc_hba *);
+void lpfc_disc_timeout(unsigned long);
+void lpfc_scan_timeout(unsigned long);
+
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
+struct lpfc_nodelist *lpfc_findnode_remove_rpi(struct lpfc_hba * phba,
+ uint16_t rpi);
+void lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint16_t rpi);
+
+int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
+int lpfc_do_work(void *);
+int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
+ uint32_t);
+
+uint32_t lpfc_cmpl_prli_reglogin_issue(struct lpfc_hba *,
+ struct lpfc_nodelist *, void *,
+ uint32_t);
+uint32_t lpfc_cmpl_plogi_prli_issue(struct lpfc_hba *, struct lpfc_nodelist *,
+ void *, uint32_t);
+
+int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
+ struct serv_parm *, uint32_t);
+int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp,
+ int);
+int lpfc_els_abort_flogi(struct lpfc_hba *);
+int lpfc_initial_flogi(struct lpfc_hba *);
+int lpfc_issue_els_plogi(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t);
+int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
+int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_nodelist *);
+void lpfc_els_retry_delay(unsigned long);
+void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
+void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_els_handle_rscn(struct lpfc_hba *);
+int lpfc_els_flush_rscn(struct lpfc_hba *);
+int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t);
+void lpfc_els_flush_cmd(struct lpfc_hba *);
+int lpfc_els_disc_adisc(struct lpfc_hba *);
+int lpfc_els_disc_plogi(struct lpfc_hba *);
+void lpfc_els_timeout(unsigned long);
+void lpfc_els_timeout_handler(struct lpfc_hba *);
+
+void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+void lpfc_fdmi_tmo(unsigned long);
+void lpfc_fdmi_tmo_handler(struct lpfc_hba *);
+
+int lpfc_config_port_prep(struct lpfc_hba *);
+int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_hba_down_prep(struct lpfc_hba *);
+void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
+void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
+uint8_t *lpfc_get_lpfchba_info(struct lpfc_hba *, uint8_t *);
+int lpfc_fcp_abort(struct lpfc_hba *, int, int, int);
+int lpfc_online(struct lpfc_hba *);
+int lpfc_offline(struct lpfc_hba *);
+
+
+int lpfc_sli_setup(struct lpfc_hba *);
+int lpfc_sli_queue_setup(struct lpfc_hba *);
+void lpfc_slim_access(struct lpfc_hba *);
+
+void lpfc_handle_eratt(struct lpfc_hba *);
+void lpfc_handle_latt(struct lpfc_hba *);
+irqreturn_t lpfc_intr_handler(int, void *, struct pt_regs *);
+
+void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
+void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+
+int lpfc_mem_alloc(struct lpfc_hba *);
+void lpfc_mem_free(struct lpfc_hba *);
+
+int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_hba_down(struct lpfc_hba *);
+int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+int lpfc_sli_handle_mb_event(struct lpfc_hba *);
+int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+ struct lpfc_sli_ring *, uint32_t);
+void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *, uint32_t);
+void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
+int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_dmabuf *);
+struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
+ struct lpfc_sli_ring *,
+ dma_addr_t);
+int lpfc_sli_issue_abort_iotag32(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
+ uint64_t, lpfc_ctx_cmd);
+int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
+ uint64_t, uint32_t, lpfc_ctx_cmd);
+
+void lpfc_mbox_timeout(unsigned long);
+void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+void lpfc_map_fcp_cmnd_to_bpl(struct lpfc_hba *, struct lpfc_scsi_buf *);
+void lpfc_free_scsi_cmd(struct lpfc_scsi_buf *);
+uint32_t lpfc_os_timeout_transform(struct lpfc_hba *, uint32_t);
+
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order,
+ uint32_t did);
+
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+ uint32_t timeout);
+
+int lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb,
+ uint32_t flag,
+ struct lpfc_iocbq * prspiocbq,
+ uint32_t timeout);
+void lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
+ struct lpfc_iocbq * queue1,
+ struct lpfc_iocbq * queue2);
+
+void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+
+/* Function prototypes. */
+const char* lpfc_info(struct Scsi_Host *);
+void lpfc_get_cfgparam(struct lpfc_hba *);
+int lpfc_alloc_sysfs_attr(struct lpfc_hba *);
+void lpfc_free_sysfs_attr(struct lpfc_hba *);
+extern struct class_device_attribute *lpfc_host_attrs[];
+extern struct scsi_host_template lpfc_template;
+extern struct fc_function_template lpfc_transport_functions;
+
+void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
+
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
+#define HBA_EVENT_RSCN 5
+#define HBA_EVENT_LINK_UP 2
+#define HBA_EVENT_LINK_DOWN 3
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
new file mode 100644
index 00000000000..c40cb239c16
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -0,0 +1,1237 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_ct.c 1.161 2005/04/13 11:59:01EDT sf_support Exp $
+ *
+ * Fibre Channel SCSI LAN Device Driver CT support
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+
+#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
+ * incapable of reporting */
+#define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */
+#define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */
+#define HBA_PORTSPEED_4GBIT 8 /* 4 GBit/sec */
+#define HBA_PORTSPEED_8GBIT 16 /* 8 GBit/sec */
+#define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */
+#define HBA_PORTSPEED_NOT_NEGOTIATED 5 /* Speed not established */
+
+#define FOURBYTES 4
+
+
+static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+
+/*
+ * lpfc_ct_unsol_event
+ */
+void
+lpfc_ct_unsol_event(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq)
+{
+
+ struct lpfc_iocbq *next_piocbq;
+ struct lpfc_dmabuf *pmbuf = NULL;
+ struct lpfc_dmabuf *matp, *next_matp;
+ uint32_t ctx = 0, size = 0, cnt = 0;
+ IOCB_t *icmd = &piocbq->iocb;
+ IOCB_t *save_icmd = icmd;
+ int i, go_exit = 0;
+ struct list_head head;
+
+ if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
+ /* Not enough posted buffers; Try posting more buffers */
+ phba->fc_stat.NoRcvBuf++;
+ lpfc_post_buffer(phba, pring, 0, 1);
+ return;
+ }
+
+ /* If there are no BDEs associated with this IOCB,
+ * there is nothing to do.
+ */
+ if (icmd->ulpBdeCount == 0)
+ return;
+
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+
+ list_for_each_entry_safe(piocbq, next_piocbq, &head, list) {
+ icmd = &piocbq->iocb;
+ if (ctx == 0)
+ ctx = (uint32_t) (icmd->ulpContext);
+ if (icmd->ulpBdeCount == 0)
+ continue;
+
+ for (i = 0; i < icmd->ulpBdeCount; i++) {
+ matp = lpfc_sli_ringpostbuf_get(phba, pring,
+ getPaddr(icmd->un.
+ cont64[i].
+ addrHigh,
+ icmd->un.
+ cont64[i].
+ addrLow));
+ if (!matp) {
+ /* Insert lpfc log message here */
+ lpfc_post_buffer(phba, pring, cnt, 1);
+ go_exit = 1;
+ goto ct_unsol_event_exit_piocbq;
+ }
+
+ /* Typically for Unsolicited CT requests */
+ if (!pmbuf) {
+ pmbuf = matp;
+ INIT_LIST_HEAD(&pmbuf->list);
+ } else
+ list_add_tail(&matp->list, &pmbuf->list);
+
+ size += icmd->un.cont64[i].tus.f.bdeSize;
+ cnt++;
+ }
+
+ icmd->ulpBdeCount = 0;
+ }
+
+ lpfc_post_buffer(phba, pring, cnt, 1);
+ if (save_icmd->ulpStatus) {
+ go_exit = 1;
+ }
+
+ct_unsol_event_exit_piocbq:
+ if (pmbuf) {
+ list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
+ lpfc_mbuf_free(phba, matp->virt, matp->phys);
+ list_del(&matp->list);
+ kfree(matp);
+ }
+ lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
+ kfree(pmbuf);
+ }
+ return;
+}
+
+static void
+lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
+{
+ struct lpfc_dmabuf *mlast, *next_mlast;
+
+ list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+ list_del(&mlast->list);
+ kfree(mlast);
+ }
+ lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+ kfree(mlist);
+ return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
+ uint32_t size, int *entries)
+{
+ struct lpfc_dmabuf *mlist = NULL;
+ struct lpfc_dmabuf *mp;
+ int cnt, i = 0;
+
+ /* We get chucks of FCELSSIZE */
+ cnt = size > FCELSSIZE ? FCELSSIZE: size;
+
+ while (size) {
+ /* Allocate buffer for rsp payload */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ if (mlist)
+ lpfc_free_ct_rsp(phba, mlist);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+
+ if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+ else
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+
+ if (!mp->virt) {
+ kfree(mp);
+ lpfc_free_ct_rsp(phba, mlist);
+ return NULL;
+ }
+
+ /* Queue it to a linked list */
+ if (!mlist)
+ mlist = mp;
+ else
+ list_add_tail(&mp->list, &mlist->list);
+
+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ /* build buffer ptr list for IOCB */
+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->tus.f.bdeSize = (uint16_t) cnt;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+
+ i++;
+ size -= cnt;
+ }
+
+ *entries = i;
+ return mlist;
+}
+
+static int
+lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
+ struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
+ struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+ uint32_t tmo)
+{
+
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *geniocb = NULL;
+
+ /* Allocate buffer for command iocb */
+ spin_lock_irq(phba->host->host_lock);
+ list_remove_head(lpfc_iocb_list, geniocb, struct lpfc_iocbq, list);
+ spin_unlock_irq(phba->host->host_lock);
+
+ if (geniocb == NULL)
+ return 1;
+ memset(geniocb, 0, sizeof (struct lpfc_iocbq));
+
+ icmd = &geniocb->iocb;
+ icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
+
+ if (usr_flg)
+ geniocb->context3 = NULL;
+ else
+ geniocb->context3 = (uint8_t *) bmp;
+
+ /* Save for completion so we can release these resources */
+ geniocb->context1 = (uint8_t *) inp;
+ geniocb->context2 = (uint8_t *) outp;
+
+ /* Fill in payload, bp points to frame payload */
+ icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+
+ /* Fill in rest of iocb */
+ icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+ icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
+ icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
+
+ if (!tmo)
+ tmo = (2 * phba->fc_ratov) + 1;
+ icmd->ulpTimeout = tmo;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+ icmd->ulpContext = ndlp->nlp_rpi;
+
+ /* Issue GEN REQ IOCB for NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0119 Issue GEN REQ IOCB for NPORT x%x "
+ "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
+ icmd->ulpIoTag, phba->hba_state);
+ geniocb->iocb_cmpl = cmpl;
+ geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ spin_lock_irq(phba->host->host_lock);
+ if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
+ list_add_tail(&geniocb->list, lpfc_iocb_list);
+ spin_unlock_irq(phba->host->host_lock);
+ return 1;
+ }
+ spin_unlock_irq(phba->host->host_lock);
+
+ return 0;
+}
+
+static int
+lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
+ struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
+ uint32_t rsp_size)
+{
+ struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
+ struct lpfc_dmabuf *outmp;
+ int cnt = 0, status;
+ int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
+ CommandResponse.bits.CmdRsp;
+
+ bpl++; /* Skip past ct request */
+
+ /* Put buffer(s) for ct rsp in bpl */
+ outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
+ if (!outmp)
+ return -ENOMEM;
+
+ status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0,
+ cnt+1, 0);
+ if (status) {
+ lpfc_free_ct_rsp(phba, outmp);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
+{
+ struct lpfc_sli_ct_request *Response =
+ (struct lpfc_sli_ct_request *) mp->virt;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_dmabuf *mlast, *next_mp;
+ uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
+ uint32_t Did;
+ uint32_t CTentry;
+ int Cnt;
+ struct list_head head;
+
+ lpfc_set_disctmo(phba);
+
+ Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
+
+ list_add_tail(&head, &mp->list);
+ list_for_each_entry_safe(mp, next_mp, &head, list) {
+ mlast = mp;
+
+ Size -= Cnt;
+
+ if (!ctptr)
+ ctptr = (uint32_t *) mlast->virt;
+ else
+ Cnt -= 16; /* subtract length of CT header */
+
+ /* Loop through entire NameServer list of DIDs */
+ while (Cnt) {
+
+ /* Get next DID from NameServer List */
+ CTentry = *ctptr++;
+ Did = ((be32_to_cpu(CTentry)) & Mask_DID);
+
+ ndlp = NULL;
+ if (Did != phba->fc_myDID) {
+ /* Check for rscn processing or not */
+ ndlp = lpfc_setup_disc_node(phba, Did);
+ }
+ /* Mark all node table entries that are in the
+ Nameserver */
+ if (ndlp) {
+ /* NameServer Rsp */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0238 Process x%x NameServer"
+ " Rsp Data: x%x x%x x%x\n",
+ phba->brd_no,
+ Did, ndlp->nlp_flag,
+ phba->fc_flag,
+ phba->fc_rscn_id_cnt);
+ } else {
+ /* NameServer Rsp */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0239 Skip x%x NameServer "
+ "Rsp Data: x%x x%x x%x\n",
+ phba->brd_no,
+ Did, Size, phba->fc_flag,
+ phba->fc_rscn_id_cnt);
+ }
+
+ if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
+ goto nsout1;
+ Cnt -= sizeof (uint32_t);
+ }
+ ctptr = NULL;
+
+ }
+
+nsout1:
+ list_del(&head);
+
+ /* Here we are finished in the case RSCN */
+ if (phba->hba_state == LPFC_HBA_READY) {
+ lpfc_els_flush_rscn(phba);
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ return 0;
+}
+
+
+
+
+static void
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *inp;
+ struct lpfc_dmabuf *outp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus) {
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
+ goto out;
+ }
+
+ /* Check for retry */
+ if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ phba->fc_ns_retry++;
+ /* CT command is being retried */
+ ndlp =
+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ NameServer_DID);
+ if (ndlp) {
+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
+ 0) {
+ goto out;
+ }
+ }
+ }
+ } else {
+ /* Good status, continue checking */
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+ lpfc_ns_rsp(phba, outp,
+ (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+ } else if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ /* NameServer Rsp Error */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0240 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation,
+ phba->fc_flag);
+ } else {
+ /* NameServer Rsp Error */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0241 NameServer Rsp Error "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation,
+ phba->fc_flag);
+ }
+ }
+ /* Link up / RSCN discovery */
+ lpfc_disc_start(phba);
+out:
+ lpfc_free_ct_rsp(phba, outp);
+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(inp);
+ kfree(bmp);
+ spin_lock_irq(phba->host->host_lock);
+ list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *inp;
+ struct lpfc_dmabuf *outp;
+ IOCB_t *irsp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+ irsp = &rspiocb->iocb;
+
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+
+ /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0209 RFT request completes ulpStatus x%x "
+ "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus,
+ CTrsp->CommandResponse.bits.CmdRsp);
+
+ lpfc_free_ct_rsp(phba, outp);
+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(inp);
+ kfree(bmp);
+ spin_lock_irq(phba->host->host_lock);
+ list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ return;
+}
+
+void
+lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
+{
+ char fwrev[16];
+
+ lpfc_decode_firmware_rev(phba, fwrev, 0);
+
+ if (phba->Port[0]) {
+ sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName,
+ phba->Port, fwrev, lpfc_release_version);
+ } else {
+ sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
+ fwrev, lpfc_release_version);
+ }
+}
+
+/*
+ * lpfc_ns_cmd
+ * Description:
+ * Issue Cmd to NameServer
+ * SLI_CTNS_GID_FT
+ * LI_CTNS_RFT_ID
+ */
+int
+lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+{
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *CtReq;
+ struct ulp_bde64 *bpl;
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *) = NULL;
+ uint32_t rsp_size = 1024;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp)
+ goto ns_cmd_exit;
+
+ INIT_LIST_HEAD(&mp->list);
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+ if (!mp->virt)
+ goto ns_cmd_free_mp;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp)
+ goto ns_cmd_free_mpvirt;
+
+ INIT_LIST_HEAD(&bmp->list);
+ bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
+ if (!bmp->virt)
+ goto ns_cmd_free_bmp;
+
+ /* NameServer Req */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0236 NameServer Req Data: x%x x%x x%x\n",
+ phba->brd_no, cmdcode, phba->fc_flag,
+ phba->fc_rscn_id_cnt);
+
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ memset(bpl, 0, sizeof(struct ulp_bde64));
+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->tus.f.bdeFlags = 0;
+ if (cmdcode == SLI_CTNS_GID_FT)
+ bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RFT_ID)
+ bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RNN_ID)
+ bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSNN_NN)
+ bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+ else
+ bpl->tus.f.bdeSize = 0;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+ memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request));
+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ CtReq->RevisionId.bits.InId = 0;
+ CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
+ CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
+ CtReq->CommandResponse.bits.Size = 0;
+ switch (cmdcode) {
+ case SLI_CTNS_GID_FT:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_GID_FT);
+ CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
+ if (phba->hba_state < LPFC_HBA_READY)
+ phba->hba_state = LPFC_NS_QRY;
+ lpfc_set_disctmo(phba);
+ cmpl = lpfc_cmpl_ct_cmd_gid_ft;
+ rsp_size = FC_MAX_NS_RSP;
+ break;
+
+ case SLI_CTNS_RFT_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RFT_ID);
+ CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID);
+ CtReq->un.rft.fcpReg = 1;
+ cmpl = lpfc_cmpl_ct_cmd_rft_id;
+ break;
+
+ case SLI_CTNS_RNN_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RNN_ID);
+ CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID);
+ memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename,
+ sizeof (struct lpfc_name));
+ cmpl = lpfc_cmpl_ct_cmd_rnn_id;
+ break;
+
+ case SLI_CTNS_RSNN_NN:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RSNN_NN);
+ memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename,
+ sizeof (struct lpfc_name));
+ lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
+ CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
+ cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
+ break;
+ }
+
+ if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size))
+ /* On success, The cmpl function will free the buffers */
+ return 0;
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ns_cmd_free_bmp:
+ kfree(bmp);
+ns_cmd_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ns_cmd_free_mp:
+ kfree(mp);
+ns_cmd_exit:
+ return 1;
+}
+
+static void
+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_dmabuf *bmp = cmdiocb->context3;
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_sli_ct_request *CTrsp = outp->virt;
+ struct lpfc_sli_ct_request *CTcmd = inp->virt;
+ struct lpfc_nodelist *ndlp;
+ uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+ uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+
+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
+ if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ /* FDMI rsp failed */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0220 FDMI rsp failed Data: x%x\n",
+ phba->brd_no,
+ be16_to_cpu(fdmi_cmd));
+ }
+
+ switch (be16_to_cpu(fdmi_cmd)) {
+ case SLI_MGMT_RHBA:
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA);
+ break;
+
+ case SLI_MGMT_RPA:
+ break;
+
+ case SLI_MGMT_DHBA:
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT);
+ break;
+
+ case SLI_MGMT_DPRT:
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA);
+ break;
+ }
+
+ lpfc_free_ct_rsp(phba, outp);
+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(inp);
+ kfree(bmp);
+ spin_lock_irq(phba->host->host_lock);
+ list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+int
+lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+{
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *CtReq;
+ struct ulp_bde64 *bpl;
+ uint32_t size;
+ REG_HBA *rh;
+ PORT_ENTRY *pe;
+ REG_PORT_ATTRIBUTE *pab;
+ ATTRIBUTE_BLOCK *ab;
+ ATTRIBUTE_ENTRY *ae;
+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp)
+ goto fdmi_cmd_exit;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+ if (!mp->virt)
+ goto fdmi_cmd_free_mp;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp)
+ goto fdmi_cmd_free_mpvirt;
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
+ if (!bmp->virt)
+ goto fdmi_cmd_free_bmp;
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ /* FDMI request */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0218 FDMI Request Data: x%x x%x x%x\n",
+ phba->brd_no,
+ phba->fc_flag, phba->hba_state, cmdcode);
+
+ CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+
+ memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ CtReq->RevisionId.bits.InId = 0;
+
+ CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+ CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+ size = 0;
+
+ switch (cmdcode) {
+ case SLI_MGMT_RHBA:
+ {
+ lpfc_vpd_t *vp = &phba->vpd;
+ uint32_t i, j, incr;
+ int len;
+
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_MGMT_RHBA);
+ CtReq->CommandResponse.bits.Size = 0;
+ rh = (REG_HBA *) & CtReq->un.PortID;
+ memcpy(&rh->hi.PortName, &phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ /* One entry (port) per adapter */
+ rh->rpl.EntryCnt = be32_to_cpu(1);
+ memcpy(&rh->rpl.pe, &phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+
+ /* point to the HBA attribute block */
+ size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
+ ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
+ ab->EntryCnt = 0;
+
+ /* Point to the beginning of the first HBA attribute
+ entry */
+ /* #1 HBA attribute entry */
+ size += FOURBYTES;
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
+ + sizeof (struct lpfc_name));
+ memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ ab->EntryCnt++;
+ size += FOURBYTES + sizeof (struct lpfc_name);
+
+ /* #2 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
+ strcpy(ae->un.Manufacturer, "Emulex Corporation");
+ len = strlen(ae->un.Manufacturer);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #3 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
+ strcpy(ae->un.SerialNumber, phba->SerialNumber);
+ len = strlen(ae->un.SerialNumber);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #4 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MODEL);
+ strcpy(ae->un.Model, phba->ModelName);
+ len = strlen(ae->un.Model);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #5 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
+ strcpy(ae->un.ModelDescription, phba->ModelDesc);
+ len = strlen(ae->un.ModelDescription);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #6 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
+ /* Convert JEDEC ID to ascii for hardware version */
+ incr = vp->rev.biuRev;
+ for (i = 0; i < 8; i++) {
+ j = (incr & 0xf);
+ if (j <= 9)
+ ae->un.HardwareVersion[7 - i] =
+ (char)((uint8_t) 0x30 +
+ (uint8_t) j);
+ else
+ ae->un.HardwareVersion[7 - i] =
+ (char)((uint8_t) 0x61 +
+ (uint8_t) (j - 10));
+ incr = (incr >> 4);
+ }
+ ab->EntryCnt++;
+ size += FOURBYTES + 8;
+
+ /* #7 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
+ strcpy(ae->un.DriverVersion, lpfc_release_version);
+ len = strlen(ae->un.DriverVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #8 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
+ strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
+ len = strlen(ae->un.OptionROMVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #9 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
+ lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
+ 1);
+ len = strlen(ae->un.FirmwareVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #10 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
+ sprintf(ae->un.OsNameVersion, "%s %s %s",
+ system_utsname.sysname, system_utsname.release,
+ system_utsname.version);
+ len = strlen(ae->un.OsNameVersion);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ ab->EntryCnt++;
+ size += FOURBYTES + len;
+
+ /* #11 HBA attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ ae->un.MaxCTPayloadLen = (65 * 4096);
+ ab->EntryCnt++;
+ size += FOURBYTES + 4;
+
+ ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
+ /* Total size */
+ size = GID_REQUEST_SZ - 4 + size;
+ }
+ break;
+
+ case SLI_MGMT_RPA:
+ {
+ lpfc_vpd_t *vp;
+ struct serv_parm *hsp;
+ int len;
+
+ vp = &phba->vpd;
+
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_MGMT_RPA);
+ CtReq->CommandResponse.bits.Size = 0;
+ pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
+ size = sizeof (struct lpfc_name) + FOURBYTES;
+ memcpy((uint8_t *) & pab->PortName,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ pab->ab.EntryCnt = 0;
+
+ /* #1 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
+ ae->un.SupportFC4Types[2] = 1;
+ ae->un.SupportFC4Types[7] = 1;
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 32;
+
+ /* #2 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ if (FC_JEDEC_ID(vp->rev.biuRev) == VIPER_JEDEC_ID)
+ ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT;
+ else if (FC_JEDEC_ID(vp->rev.biuRev) == HELIOS_JEDEC_ID)
+ ae->un.SupportSpeed = HBA_PORTSPEED_4GBIT;
+ else if ((FC_JEDEC_ID(vp->rev.biuRev) ==
+ CENTAUR_2G_JEDEC_ID)
+ || (FC_JEDEC_ID(vp->rev.biuRev) ==
+ PEGASUS_JEDEC_ID)
+ || (FC_JEDEC_ID(vp->rev.biuRev) ==
+ THOR_JEDEC_ID))
+ ae->un.SupportSpeed = HBA_PORTSPEED_2GBIT;
+ else
+ ae->un.SupportSpeed = HBA_PORTSPEED_1GBIT;
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #3 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ switch(phba->fc_linkspeed) {
+ case LA_1GHZ_LINK:
+ ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
+ break;
+ case LA_2GHZ_LINK:
+ ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
+ break;
+ case LA_4GHZ_LINK:
+ ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
+ break;
+ default:
+ ae->un.PortSpeed =
+ HBA_PORTSPEED_UNKNOWN;
+ break;
+ }
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #4 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+ hsp = (struct serv_parm *) & phba->fc_sparam;
+ ae->un.MaxFrameSize =
+ (((uint32_t) hsp->cmn.
+ bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
+ bbRcvSizeLsb;
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + 4;
+
+ /* #5 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+ ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
+ strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
+ len = strlen((char *)ae->un.OsDeviceName);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+
+ if (phba->cfg_fdmi_on == 2) {
+ /* #6 Port attribute entry */
+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
+ size);
+ ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
+ sprintf(ae->un.HostName, "%s",
+ system_utsname.nodename);
+ len = strlen(ae->un.HostName);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ ae->ad.bits.AttrLen =
+ be16_to_cpu(FOURBYTES + len);
+ pab->ab.EntryCnt++;
+ size += FOURBYTES + len;
+ }
+
+ pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
+ /* Total size */
+ size = GID_REQUEST_SZ - 4 + size;
+ }
+ break;
+
+ case SLI_MGMT_DHBA:
+ CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
+ CtReq->CommandResponse.bits.Size = 0;
+ pe = (PORT_ENTRY *) & CtReq->un.PortID;
+ memcpy((uint8_t *) & pe->PortName,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ break;
+
+ case SLI_MGMT_DPRT:
+ CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
+ CtReq->CommandResponse.bits.Size = 0;
+ pe = (PORT_ENTRY *) & CtReq->un.PortID;
+ memcpy((uint8_t *) & pe->PortName,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+ break;
+ }
+
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = size;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ cmpl = lpfc_cmpl_ct_cmd_fdmi;
+
+ if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
+ return 0;
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+fdmi_cmd_free_bmp:
+ kfree(bmp);
+fdmi_cmd_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+fdmi_cmd_free_mp:
+ kfree(mp);
+fdmi_cmd_exit:
+ /* Issue FDMI request failed */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0244 Issue FDMI request failed Data: x%x\n",
+ phba->brd_no,
+ cmdcode);
+ return 1;
+}
+
+void
+lpfc_fdmi_tmo(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ unsigned long iflag;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
+ phba->work_hba_events |= WORKER_FDMI_TMO;
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ }
+ spin_unlock_irqrestore(phba->host->host_lock,iflag);
+}
+
+void
+lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(phba->host->host_lock);
+ if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+ }
+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
+ if (ndlp) {
+ if (system_utsname.nodename[0] != '\0') {
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
+ } else {
+ mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
+ }
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+
+
+void
+lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ lpfc_vpd_t *vp = &phba->vpd;
+ uint32_t b1, b2, b3, b4, i, rev;
+ char c;
+ uint32_t *ptr, str[4];
+ uint8_t *fwname;
+
+ if (vp->rev.rBit) {
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+ rev = vp->rev.sli2FwRev;
+ else
+ rev = vp->rev.sli1FwRev;
+
+ b1 = (rev & 0x0000f000) >> 12;
+ b2 = (rev & 0x00000f00) >> 8;
+ b3 = (rev & 0x000000c0) >> 6;
+ b4 = (rev & 0x00000030) >> 4;
+
+ switch (b4) {
+ case 0:
+ c = 'N';
+ break;
+ case 1:
+ c = 'A';
+ break;
+ case 2:
+ c = 'B';
+ break;
+ default:
+ c = 0;
+ break;
+ }
+ b4 = (rev & 0x0000000f);
+
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+ fwname = vp->rev.sli2FwName;
+ else
+ fwname = vp->rev.sli1FwName;
+
+ for (i = 0; i < 16; i++)
+ if (fwname[i] == 0x20)
+ fwname[i] = 0;
+
+ ptr = (uint32_t*)fwname;
+
+ for (i = 0; i < 3; i++)
+ str[i] = be32_to_cpu(*ptr++);
+
+ if (c == 0) {
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d (%s)",
+ b1, b2, b3, (char *)str);
+ else
+ sprintf(fwrevision, "%d.%d%d", b1,
+ b2, b3);
+ } else {
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d%c%d (%s)",
+ b1, b2, b3, c,
+ b4, (char *)str);
+ else
+ sprintf(fwrevision, "%d.%d%d%c%d",
+ b1, b2, b3, c, b4);
+ }
+ } else {
+ rev = vp->rev.smFwRev;
+
+ b1 = (rev & 0xff000000) >> 24;
+ b2 = (rev & 0x00f00000) >> 20;
+ b3 = (rev & 0x000f0000) >> 16;
+ c = (rev & 0x0000ff00) >> 8;
+ b4 = (rev & 0x000000ff);
+
+ if (flag)
+ sprintf(fwrevision, "%d.%d%d%c%d ", b1,
+ b2, b3, c, b4);
+ else
+ sprintf(fwrevision, "%d.%d%d%c%d ", b1,
+ b2, b3, c, b4);
+ }
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
new file mode 100644
index 00000000000..adccc99510d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -0,0 +1,206 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_disc.h 1.61 2005/04/07 08:46:52EDT sf_support Exp $
+ */
+
+#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
+#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */
+#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
+#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
+
+
+/* This is the protocol dependent definition for a Node List Entry.
+ * This is used by Fibre Channel protocol to support FCP.
+ */
+
+/* structure used to queue event to the discovery tasklet */
+struct lpfc_work_evt {
+ struct list_head evt_listp;
+ void * evt_arg1;
+ void * evt_arg2;
+ uint32_t evt;
+};
+
+#define LPFC_EVT_NODEV_TMO 0x1
+#define LPFC_EVT_ONLINE 0x2
+#define LPFC_EVT_OFFLINE 0x3
+#define LPFC_EVT_ELS_RETRY 0x4
+
+struct lpfc_nodelist {
+ struct list_head nlp_listp;
+ struct lpfc_name nlp_portname; /* port name */
+ struct lpfc_name nlp_nodename; /* node name */
+ uint32_t nlp_flag; /* entry flags */
+ uint32_t nlp_DID; /* FC D_ID of entry */
+ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
+ uint16_t nlp_type;
+#define NLP_FC_NODE 0x1 /* entry is an FC node */
+#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */
+#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */
+#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
+
+ uint16_t nlp_rpi;
+ uint16_t nlp_state; /* state transition indicator */
+ uint16_t nlp_xri; /* output exchange id for RPI */
+ uint16_t nlp_sid; /* scsi id */
+#define NLP_NO_SID 0xffff
+ uint16_t nlp_maxframe; /* Max RCV frame size */
+ uint8_t nlp_class_sup; /* Supported Classes */
+ uint8_t nlp_retry; /* used for ELS retries */
+ uint8_t nlp_disc_refcnt; /* used for DSM */
+ uint8_t nlp_fcp_info; /* class info, bits 0-3 */
+#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
+
+ struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
+ struct timer_list nlp_tmofunc; /* Used for nodev tmo */
+ struct fc_rport *rport; /* Corresponding FC transport
+ port structure */
+ struct lpfc_nodelist *nlp_rpi_hash_next;
+ struct lpfc_hba *nlp_phba;
+ struct lpfc_work_evt nodev_timeout_evt;
+ struct lpfc_work_evt els_retry_evt;
+};
+
+/* Defines for nlp_flag (uint32) */
+#define NLP_NO_LIST 0x0 /* Indicates immediately free node */
+#define NLP_UNUSED_LIST 0x1 /* Flg to indicate node will be freed */
+#define NLP_PLOGI_LIST 0x2 /* Flg to indicate sent PLOGI */
+#define NLP_ADISC_LIST 0x3 /* Flg to indicate sent ADISC */
+#define NLP_REGLOGIN_LIST 0x4 /* Flg to indicate sent REG_LOGIN */
+#define NLP_PRLI_LIST 0x5 /* Flg to indicate sent PRLI */
+#define NLP_UNMAPPED_LIST 0x6 /* Node is now unmapped */
+#define NLP_MAPPED_LIST 0x7 /* Node is now mapped */
+#define NLP_NPR_LIST 0x8 /* Node is in NPort Recovery state */
+#define NLP_JUST_DQ 0x9 /* just deque ndlp in lpfc_nlp_list */
+#define NLP_LIST_MASK 0xf /* mask to see what list node is on */
+#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */
+#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */
+#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */
+#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
+#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
+#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
+#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
+#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
+#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
+#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
+#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */
+#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */
+#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful
+ ACC */
+#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
+ NPR list */
+#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
+
+/* Defines for list searchs */
+#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
+#define NLP_SEARCH_UNMAPPED 0x2 /* search unmapped */
+#define NLP_SEARCH_PLOGI 0x4 /* search plogi */
+#define NLP_SEARCH_ADISC 0x8 /* search adisc */
+#define NLP_SEARCH_REGLOGIN 0x10 /* search reglogin */
+#define NLP_SEARCH_PRLI 0x20 /* search prli */
+#define NLP_SEARCH_NPR 0x40 /* search npr */
+#define NLP_SEARCH_UNUSED 0x80 /* search mapped */
+#define NLP_SEARCH_ALL 0xff /* search all lists */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+ * when Link Up discovery or Registered State Change Notification (RSCN)
+ * processing is needed. Each list holds the nodes that require a PLOGI or
+ * ADISC Extended Link Service (ELS) request. These lists keep track of the
+ * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
+ * by Link Up) event. The unmapped_list contains all nodes that have
+ * successfully logged into at the Fibre Channel level. The
+ * mapped_list will contain all nodes that are mapped FCP targets.
+ *
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+
+/* Defines for nlp_state */
+#define NLP_STE_UNUSED_NODE 0x0 /* node is just allocated */
+#define NLP_STE_PLOGI_ISSUE 0x1 /* PLOGI was sent to NL_PORT */
+#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
+#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
+#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */
+#define NLP_STE_MAX_STATE 0x8
+#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
+
+/* For UNUSED_NODE state, the node has just been allocated.
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to PRLI_COMPL. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
+ * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list. 32 entries are processed initially and
+ * ADISC is initited for each one. Completions / Events for each node are
+ * funnelled thru the state machine. As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list. 32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine. As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * identically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+/* Defines for Node List Entry Events that could happen */
+#define NLP_EVT_RCV_PLOGI 0x0 /* Rcv'd an ELS PLOGI command */
+#define NLP_EVT_RCV_PRLI 0x1 /* Rcv'd an ELS PRLI command */
+#define NLP_EVT_RCV_LOGO 0x2 /* Rcv'd an ELS LOGO command */
+#define NLP_EVT_RCV_ADISC 0x3 /* Rcv'd an ELS ADISC command */
+#define NLP_EVT_RCV_PDISC 0x4 /* Rcv'd an ELS PDISC command */
+#define NLP_EVT_RCV_PRLO 0x5 /* Rcv'd an ELS PRLO command */
+#define NLP_EVT_CMPL_PLOGI 0x6 /* Sent an ELS PLOGI command */
+#define NLP_EVT_CMPL_PRLI 0x7 /* Sent an ELS PRLI command */
+#define NLP_EVT_CMPL_LOGO 0x8 /* Sent an ELS LOGO command */
+#define NLP_EVT_CMPL_ADISC 0x9 /* Sent an ELS ADISC command */
+#define NLP_EVT_CMPL_REG_LOGIN 0xa /* REG_LOGIN mbox cmd completed */
+#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */
+#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */
+#define NLP_EVT_MAX_EVENT 0xd
+
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
new file mode 100644
index 00000000000..68d1b77e025
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -0,0 +1,3258 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_els.c 1.186 2005/04/13 14:26:55EDT sf_support Exp $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+static int lpfc_max_els_tries = 3;
+
+static int
+lpfc_els_chk_latt(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t ha_copy;
+ int rc;
+
+ psli = &phba->sli;
+
+ if ((phba->hba_state >= LPFC_HBA_READY) ||
+ (phba->hba_state == LPFC_LINK_DOWN))
+ return 0;
+
+ /* Read the HBA Host Attention Register */
+ spin_lock_irq(phba->host->host_lock);
+ ha_copy = readl(phba->HAregaddr);
+ spin_unlock_irq(phba->host->host_lock);
+
+ if (!(ha_copy & HA_LATT))
+ return 0;
+
+ /* Pending Link Event during Discovery */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
+ "%d:0237 Pending Link Event during "
+ "Discovery: State x%x\n",
+ phba->brd_no, phba->hba_state);
+
+ /* CLEAR_LA should re-enable link attention events and
+ * we should then imediately take a LATT event. The
+ * LATT processing should call lpfc_linkdown() which
+ * will cleanup any left over in-progress discovery
+ * events.
+ */
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_ABORT_DISCOVERY;
+ spin_unlock_irq(phba->host->host_lock);
+
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ rc = lpfc_sli_issue_mbox (phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ phba->hba_state = LPFC_HBA_ERROR;
+ }
+ }
+ }
+
+ return (1);
+
+}
+
+static struct lpfc_iocbq *
+lpfc_prep_els_iocb(struct lpfc_hba * phba,
+ uint8_t expectRsp,
+ uint16_t cmdSize,
+ uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd)
+{
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *elsiocb = NULL;
+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
+ struct ulp_bde64 *bpl;
+ IOCB_t *icmd;
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ if (phba->hba_state < LPFC_LINK_UP)
+ return NULL;
+
+
+ /* Allocate buffer for command iocb */
+ spin_lock_irq(phba->host->host_lock);
+ list_remove_head(lpfc_iocb_list, elsiocb, struct lpfc_iocbq, list);
+ spin_unlock_irq(phba->host->host_lock);
+
+ if (elsiocb == NULL)
+ return NULL;
+ memset(elsiocb, 0, sizeof (struct lpfc_iocbq));
+ icmd = &elsiocb->iocb;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
+ ((pcmd->virt = lpfc_mbuf_alloc(phba,
+ MEM_PRI, &(pcmd->phys))) == 0)) {
+ if (pcmd)
+ kfree(pcmd);
+
+ list_add_tail(&elsiocb->list, lpfc_iocb_list);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ /* Allocate buffer for response payload */
+ if (expectRsp) {
+ prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (prsp)
+ prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &prsp->phys);
+ if (prsp == 0 || prsp->virt == 0) {
+ if (prsp)
+ kfree(prsp);
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ kfree(pcmd);
+ list_add_tail(&elsiocb->list, lpfc_iocb_list);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&prsp->list);
+ } else {
+ prsp = NULL;
+ }
+
+ /* Allocate buffer for Buffer ptr list */
+ pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (pbuflist)
+ pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &pbuflist->phys);
+ if (pbuflist == 0 || pbuflist->virt == 0) {
+ list_add_tail(&elsiocb->list, lpfc_iocb_list);
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+ kfree(pcmd);
+ kfree(prsp);
+ if (pbuflist)
+ kfree(pbuflist);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&pbuflist->list);
+
+ icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+ icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+ icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ if (expectRsp) {
+ icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
+ icmd->un.elsreq64.remoteID = ndlp->nlp_DID; /* DID */
+ icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ } else {
+ icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
+ icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ }
+
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ bpl = (struct ulp_bde64 *) pbuflist->virt;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
+ bpl->tus.f.bdeSize = cmdSize;
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ if (expectRsp) {
+ bpl++;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
+ bpl->tus.f.bdeSize = FCELSSIZE;
+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
+
+ /* Save for completion so we can release these resources */
+ elsiocb->context1 = (uint8_t *) ndlp;
+ elsiocb->context2 = (uint8_t *) pcmd;
+ elsiocb->context3 = (uint8_t *) pbuflist;
+ elsiocb->retry = retry;
+ elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
+
+ if (prsp) {
+ list_add(&prsp->list, &pcmd->list);
+ }
+
+ if (expectRsp) {
+ /* Xmit ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0116 Xmit ELS command x%x to remote "
+ "NPORT x%x Data: x%x x%x\n",
+ phba->brd_no, elscmd,
+ ndlp->nlp_DID, icmd->ulpIoTag, phba->hba_state);
+ } else {
+ /* Xmit ELS response <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0117 Xmit ELS response x%x to remote "
+ "NPORT x%x Data: x%x x%x\n",
+ phba->brd_no, elscmd,
+ ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
+ }
+
+ return (elsiocb);
+}
+
+
+static int
+lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp, IOCB_t *irsp)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_FABRIC;
+ spin_unlock_irq(phba->host->host_lock);
+
+ phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
+ phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
+
+ phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
+
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(phba->host->host_lock);
+ } else {
+ /*
+ * If we are a N-port connected to a Fabric, fixup sparam's so
+ * logins to devices on remote loops work.
+ */
+ phba->fc_sparam.cmn.altBbCredit = 1;
+ }
+
+ phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+ ndlp->nlp_class_sup = 0;
+ if (sp->cls1.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS1;
+ if (sp->cls2.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS2;
+ if (sp->cls3.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS3;
+ if (sp->cls4.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS4;
+ ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ sp->cmn.bbRcvSizeLsb;
+ memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ phba->hba_state = LPFC_FABRIC_CFG_LINK;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+ if (rc == MBX_NOT_FINISHED)
+ goto fail_free_mbox;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
+ goto fail_free_mbox;
+
+ /*
+ * set_slim mailbox command needs to execute first,
+ * queue this command to be processed later.
+ */
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
+ mbox->context2 = ndlp;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+ if (rc == MBX_NOT_FINISHED)
+ goto fail_free_mbox;
+
+ return 0;
+
+ fail_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ fail:
+ return -ENXIO;
+}
+
+/*
+ * We FLOGIed into an NPort, initiate pt2pt protocol
+ */
+static int
+lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(phba->host->host_lock);
+
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ rc = memcmp(&phba->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ if (rc >= 0) {
+ /* This side will initiate the PLOGI */
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /*
+ * N_Port ID cannot be 0, set our to LocalID the other
+ * side will be RemoteID.
+ */
+
+ /* not equal */
+ if (rc)
+ phba->fc_myDID = PT2PT_LocalID;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT | MBX_STOP_IOCB);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ mempool_free(ndlp, phba->nlp_mem_pool);
+
+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, PT2PT_RemoteID);
+ if (!ndlp) {
+ /*
+ * Cannot find existing Fabric ndlp, so allocate a
+ * new one
+ */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ goto fail;
+
+ lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);
+ }
+
+ memcpy(&ndlp->nlp_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ } else {
+ /* This side will wait for the PLOGI */
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_PT2PT;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Start discovery - this should just do CLEAR_LA */
+ lpfc_disc_start(phba);
+ return 0;
+ fail:
+ return -ENXIO;
+}
+
+static void
+lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
+ int rc;
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ lpfc_nlp_remove(phba, ndlp);
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ goto out;
+ }
+ /* FLOGI failed, so there is no fabric */
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* If private loop, then allow max outstandting els to be
+ * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
+ * alpa map would take too long otherwise.
+ */
+ if (phba->alpa_map[0] == 0) {
+ phba->cfg_discovery_threads =
+ LPFC_MAX_DISC_THREADS;
+ }
+
+ /* FLOGI failure */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0100 FLOGI failure Data: x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto flogifail;
+ }
+
+ /*
+ * The FLogI succeeded. Sync the data for the CPU before
+ * accessing it.
+ */
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+
+ sp = prsp->virt + sizeof(uint32_t);
+
+ /* FLOGI completes successfully */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0101 FLOGI completes sucessfully "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+
+ if (phba->hba_state == LPFC_FLOGI) {
+ /*
+ * If Common Service Parameters indicate Nport
+ * we are point to point, if Fport we are Fabric.
+ */
+ if (sp->cmn.fPort)
+ rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);
+ else
+ rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);
+
+ if (!rc)
+ goto out;
+ }
+
+flogifail:
+ lpfc_nlp_remove(phba, ndlp);
+
+ if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+ (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
+ irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+static int
+lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ struct serv_parm *sp;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ uint32_t tmo;
+ int rc;
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_FLOGI)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For FLOGI request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+
+ /* Setup CSPs accordingly for Fabric */
+ sp->cmn.e_d_tov = 0;
+ sp->cmn.w2.r_a_tov = 0;
+ sp->cls1.classValid = 0;
+ sp->cls2.seqDelivery = 1;
+ sp->cls3.seqDelivery = 1;
+ if (sp->cmn.fcphLow < FC_PH3)
+ sp->cmn.fcphLow = FC_PH3;
+ if (sp->cmn.fcphHigh < FC_PH3)
+ sp->cmn.fcphHigh = FC_PH3;
+
+ tmo = phba->fc_ratov;
+ phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
+ lpfc_set_disctmo(phba);
+ phba->fc_ratov = tmo;
+
+ phba->fc_stat.elsXmitFLOGI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+ spin_lock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ spin_unlock_irq(phba->host->host_lock);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_abort_flogi(struct lpfc_hba * phba)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_nodelist *ndlp;
+ IOCB_t *icmd;
+
+ /* Abort outstanding I/O on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0201 Abort outstanding I/O on NPort x%x\n",
+ phba->brd_no, Fabric_DID);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ /*
+ * Check the txcmplq for an iocb that matches the nport the driver is
+ * searching for.
+ */
+ spin_lock_irq(phba->host->host_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ icmd = &iocb->iocb;
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ndlp = (struct lpfc_nodelist *)(iocb->context1);
+ if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
+ list_del(&iocb->list);
+ pring->txcmplq_cnt--;
+
+ if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
+ lpfc_sli_issue_abort_iotag32
+ (phba, pring, iocb);
+ }
+ if (iocb->iocb_cmpl) {
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ spin_unlock_irq(phba->host->host_lock);
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ spin_lock_irq(phba->host->host_lock);
+ } else {
+ list_add_tail(&iocb->list,
+ &phba->lpfc_iocb_list);
+ }
+ }
+ }
+ }
+ spin_unlock_irq(phba->host->host_lock);
+
+ return 0;
+}
+
+int
+lpfc_initial_flogi(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp;
+
+ /* First look for Fabric ndlp on the unmapped list */
+
+ if ((ndlp =
+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ Fabric_DID)) == 0) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+ == 0) {
+ return (0);
+ }
+ lpfc_nlp_init(phba, ndlp, Fabric_DID);
+ }
+ else {
+ phba->fc_unmap_cnt--;
+ list_del(&ndlp->nlp_listp);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_LIST_MASK;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ return (1);
+}
+
+static void
+lpfc_more_plogi(struct lpfc_hba * phba)
+{
+ int sentplogi;
+
+ if (phba->num_disc_nodes)
+ phba->num_disc_nodes--;
+
+ /* Continue discovery with <num_disc_nodes> PLOGIs to go */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0232 Continue discovery with %d PLOGIs to go "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
+ phba->fc_flag, phba->hba_state);
+
+ /* Check to see if there are more PLOGIs to be sent */
+ if (phba->fc_flag & FC_NLP_MORE) {
+ /* go thru NPR list and issue any remaining ELS PLOGIs */
+ sentplogi = lpfc_els_disc_plogi(phba);
+ }
+ return;
+}
+
+static void
+lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ int disc, rc, did, type;
+
+ psli = &phba->sli;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &rspiocb->iocb;
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_PLOGI_SND;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Since ndlp can be freed in the disc state machine, note if this node
+ * is being used during discovery.
+ */
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ rc = 0;
+
+ /* PLOGI completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0102 PLOGI completes to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ goto out;
+ }
+
+ /* ndlp could be freed in DSM, save these values now */
+ type = ndlp->nlp_type;
+ did = ndlp->nlp_DID;
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ if (disc) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ goto out;
+ }
+
+ /* PLOGI failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ }
+ else {
+ rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
+ }
+ } else {
+ /* Good status, call state machine */
+ rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
+ }
+
+ if (type & NLP_FABRIC) {
+ /* If we cannot login to Nameserver, kick off discovery now */
+ if ((did == NameServer_DID) && (rc == NLP_STE_FREED_NODE)) {
+ lpfc_disc_start(phba);
+ }
+ goto out;
+ }
+
+ if (disc && phba->num_disc_nodes) {
+ /* Check to see if there are more PLOGIs to be sent */
+ lpfc_more_plogi(phba);
+ }
+
+ if (rc != NLP_STE_FREED_NODE) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+
+ if (phba->num_disc_nodes == 0) {
+ if(disc) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ lpfc_can_disctmo(phba);
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /* Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if ((phba->fc_rscn_id_cnt == 0) &&
+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(phba->host->host_lock);
+ } else {
+ lpfc_els_handle_rscn(phba);
+ }
+ }
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_plogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ struct serv_parm *sp;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_PLOGI)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For PLOGI request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+
+ if (sp->cmn.fcphLow < FC_PH_4_3)
+ sp->cmn.fcphLow = FC_PH_4_3;
+
+ if (sp->cmn.fcphHigh < FC_PH3)
+ sp->cmn.fcphHigh = FC_PH3;
+
+ phba->fc_stat.elsXmitPLOGI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_PLOGI_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_PLOGI_SND;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ return (0);
+}
+
+static void
+lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* PRLI completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0103 PRLI completes to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], phba->num_disc_nodes);
+
+ phba->fc_prli_sent--;
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba))
+ goto out;
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ goto out;
+ }
+ /* PRLI failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ goto out;
+ }
+ else {
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
+ }
+ } else {
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ PRLI *npr;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_PRLI)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For PRLI request, remainder of payload is service parameters */
+ memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
+ *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
+ pcmd += sizeof (uint32_t);
+
+ /* For PRLI, remainder of payload is PRLI parameter page */
+ npr = (PRLI *) pcmd;
+ /*
+ * If our firmware version is 3.20 or later,
+ * set the following bits for FC-TAPE support.
+ */
+ if (phba->vpd.rev.feaLevelHigh >= 0x02) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+
+ /* For FCP support */
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+
+ phba->fc_stat.elsXmitPRLI++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_PRLI_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ phba->fc_prli_sent++;
+ return (0);
+}
+
+static void
+lpfc_more_adisc(struct lpfc_hba * phba)
+{
+ int sentadisc;
+
+ if (phba->num_disc_nodes)
+ phba->num_disc_nodes--;
+
+ /* Continue discovery with <num_disc_nodes> ADISCs to go */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0210 Continue discovery with %d ADISCs to go "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
+ phba->fc_flag, phba->hba_state);
+
+ /* Check to see if there are more ADISCs to be sent */
+ if (phba->fc_flag & FC_NLP_MORE) {
+ lpfc_set_disctmo(phba);
+
+ /* go thru NPR list and issue any remaining ELS ADISCs */
+ sentadisc = lpfc_els_disc_adisc(phba);
+ }
+ return;
+}
+
+static void
+lpfc_rscn_disc(struct lpfc_hba * phba)
+{
+ /* RSCN discovery */
+ /* go thru NPR list and issue ELS PLOGIs */
+ if (phba->fc_npr_cnt) {
+ if (lpfc_els_disc_plogi(phba))
+ return;
+ }
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /* Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if ((phba->fc_rscn_id_cnt == 0) &&
+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(phba->host->host_lock);
+ } else {
+ lpfc_els_handle_rscn(phba);
+ }
+ }
+}
+
+static void
+lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ LPFC_MBOXQ_t *mbox;
+ int disc, rc;
+
+ psli = &phba->sli;
+
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Since ndlp can be freed in the disc state machine, note if this node
+ * is being used during discovery.
+ */
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+
+ /* ADISC completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0104 ADISC completes to NPort x%x "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ if (disc) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_set_disctmo(phba);
+ }
+ goto out;
+ }
+ /* ADISC failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+ }
+ else {
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
+ }
+ } else {
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
+ }
+
+ if (disc && phba->num_disc_nodes) {
+ /* Check to see if there are more ADISCs to be sent */
+ lpfc_more_adisc(phba);
+
+ /* Check to see if we are done with ADISC authentication */
+ if (phba->num_disc_nodes == 0) {
+ lpfc_can_disctmo(phba);
+ /* If we get here, there is nothing left to wait for */
+ if ((phba->hba_state < LPFC_HBA_READY) &&
+ (phba->hba_state != LPFC_CLEAR_LA)) {
+ /* Link up discovery */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_clear_la;
+ rc = lpfc_sli_issue_mbox
+ (phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox,
+ phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].
+ flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].
+ flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].
+ flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state =
+ LPFC_HBA_READY;
+ }
+ }
+ } else {
+ lpfc_rscn_disc(phba);
+ }
+ }
+ }
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ ADISC *ap;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_ADISC)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ /* For ADISC request, remainder of payload is service parameters */
+ *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
+ pcmd += sizeof (uint32_t);
+
+ /* Fill in ADISC payload */
+ ap = (ADISC *) pcmd;
+ ap->hardAL_PA = phba->fc_pref_ALPA;
+ memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ ap->DID = be32_to_cpu(phba->fc_myDID);
+
+ phba->fc_stat.elsXmitADISC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_ADISC_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ return (0);
+}
+
+static void
+lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ /* we pass cmdiocb to state machine which needs rspiocb as well */
+ cmdiocb->context_un.rsp_iocb = rspiocb;
+
+ irsp = &(rspiocb->iocb);
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0105 LOGO completes to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4], phba->num_disc_nodes);
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba))
+ goto out;
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ /* ELS command is being retried */
+ goto out;
+ }
+ /* LOGO failed */
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ goto out;
+ }
+ else {
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ NLP_EVT_CMPL_LOGO);
+ }
+ } else {
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+
+ if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+ lpfc_unreg_rpi(phba, ndlp);
+ }
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint8_t retry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_LOGO)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+ pcmd += sizeof (uint32_t);
+
+ /* Fill in LOGO payload */
+ *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
+
+ phba->fc_stat.elsXmitLOGO++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_SND;
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ return (0);
+}
+
+static void
+lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0106 ELS cmd tag x%x completes Data: x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(phba);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ cmdsize = (sizeof (uint32_t) + sizeof (SCR));
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL)) == 0) {
+ return (1);
+ }
+
+ lpfc_nlp_init(phba, ndlp, nportid);
+
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_SCR)) == 0) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
+ pcmd += sizeof (uint32_t);
+
+ /* For SCR, remainder of payload is SCR parameter page */
+ memset(pcmd, 0, sizeof (SCR));
+ ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+
+ phba->fc_stat.elsXmitSCR++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ spin_lock_irq(phba->host->host_lock);
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_unlock_irq(phba->host->host_lock);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (0);
+}
+
+static int
+lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ FARP *fp;
+ uint8_t *pcmd;
+ uint32_t *lp;
+ uint16_t cmdsize;
+ struct lpfc_nodelist *ondlp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ cmdsize = (sizeof (uint32_t) + sizeof (FARP));
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL)) == 0) {
+ return (1);
+ }
+ lpfc_nlp_init(phba, ndlp, nportid);
+
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
+ ndlp, ELS_CMD_RNID)) == 0) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
+ pcmd += sizeof (uint32_t);
+
+ /* Fill in FARPR payload */
+ fp = (FARP *) (pcmd);
+ memset(fp, 0, sizeof (FARP));
+ lp = (uint32_t *) pcmd;
+ *lp++ = be32_to_cpu(nportid);
+ *lp++ = be32_to_cpu(phba->fc_myDID);
+ fp->Rflags = 0;
+ fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
+
+ memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
+ memcpy(&fp->OportName, &ondlp->nlp_portname,
+ sizeof (struct lpfc_name));
+ memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
+ sizeof (struct lpfc_name));
+ }
+
+ phba->fc_stat.elsXmitFARPR++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ spin_lock_irq(phba->host->host_lock);
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_unlock_irq(phba->host->host_lock);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ return (0);
+}
+
+void
+lpfc_els_retry_delay(unsigned long ptr)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_hba *phba;
+ unsigned long iflag;
+ struct lpfc_work_evt *evtp;
+
+ ndlp = (struct lpfc_nodelist *)ptr;
+ phba = ndlp->nlp_phba;
+ evtp = &ndlp->els_retry_evt;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+ }
+
+ evtp->evt_arg1 = ndlp;
+ evtp->evt = LPFC_EVT_ELS_RETRY;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+void
+lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba;
+ uint32_t cmd;
+ uint32_t did;
+ uint8_t retry;
+
+ phba = ndlp->nlp_phba;
+ spin_lock_irq(phba->host->host_lock);
+ did = (uint32_t) (ndlp->nlp_DID);
+ cmd = (uint32_t) (ndlp->nlp_last_elscmd);
+
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+ }
+
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ retry = ndlp->nlp_retry;
+
+ switch (cmd) {
+ case ELS_CMD_FLOGI:
+ lpfc_issue_els_flogi(phba, ndlp, retry);
+ break;
+ case ELS_CMD_PLOGI:
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, retry);
+ break;
+ case ELS_CMD_ADISC:
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, retry);
+ break;
+ case ELS_CMD_PRLI:
+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+ lpfc_issue_els_prli(phba, ndlp, retry);
+ break;
+ case ELS_CMD_LOGO:
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ lpfc_issue_els_logo(phba, ndlp, retry);
+ break;
+ }
+ return;
+}
+
+static int
+lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *pcmd;
+ struct lpfc_nodelist *ndlp;
+ uint32_t *elscmd;
+ struct ls_rjt stat;
+ int retry, maxretry;
+ int delay;
+ uint32_t cmd;
+
+ retry = 0;
+ delay = 0;
+ maxretry = lpfc_max_els_tries;
+ irsp = &rspiocb->iocb;
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ cmd = 0;
+ /* Note: context2 may be 0 for internal driver abort
+ * of delays ELS command.
+ */
+
+ if (pcmd && pcmd->virt) {
+ elscmd = (uint32_t *) (pcmd->virt);
+ cmd = *elscmd++;
+ }
+
+ switch (irsp->ulpStatus) {
+ case IOSTAT_FCP_RSP_ERROR:
+ case IOSTAT_REMOTE_STOP:
+ break;
+
+ case IOSTAT_LOCAL_REJECT:
+ switch ((irsp->un.ulpWord[4] & 0xff)) {
+ case IOERR_LOOP_OPEN_FAILURE:
+ if (cmd == ELS_CMD_PLOGI) {
+ if (cmdiocb->retry == 0) {
+ delay = 1;
+ }
+ }
+ retry = 1;
+ break;
+
+ case IOERR_SEQUENCE_TIMEOUT:
+ retry = 1;
+ if ((cmd == ELS_CMD_FLOGI)
+ && (phba->fc_topology != TOPOLOGY_LOOP)) {
+ delay = 1;
+ maxretry = 48;
+ }
+ break;
+
+ case IOERR_NO_RESOURCES:
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ }
+ retry = 1;
+ break;
+
+ case IOERR_INVALID_RPI:
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_NPORT_RJT:
+ case IOSTAT_FABRIC_RJT:
+ if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ retry = 1;
+ break;
+
+ case IOSTAT_LS_RJT:
+ stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+ /* Added for Vendor specifc support
+ * Just keep retrying for these Rsn / Exp codes
+ */
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ if (stat.un.b.lsRjtRsnCodeExp ==
+ LSEXP_CMD_IN_PROGRESS) {
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ maxretry = 48;
+ }
+ retry = 1;
+ break;
+ }
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
+ break;
+ }
+ break;
+
+ case LSRJT_LOGICAL_BSY:
+ if (cmd == ELS_CMD_PLOGI) {
+ delay = 1;
+ maxretry = 48;
+ }
+ retry = 1;
+ break;
+ }
+ break;
+
+ case IOSTAT_INTERMED_RSP:
+ case IOSTAT_BA_RJT:
+ break;
+
+ default:
+ break;
+ }
+
+ if (ndlp->nlp_DID == FDMI_DID) {
+ retry = 1;
+ }
+
+ if ((++cmdiocb->retry) >= maxretry) {
+ phba->fc_stat.elsRetryExceeded++;
+ retry = 0;
+ }
+
+ if (retry) {
+
+ /* Retry ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0107 Retry ELS command x%x to remote "
+ "NPORT x%x Data: x%x x%x\n",
+ phba->brd_no,
+ cmd, ndlp->nlp_DID, cmdiocb->retry, delay);
+
+ if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
+ /* If discovery / RSCN timer is running, reset it */
+ if (timer_pending(&phba->fc_disctmo) ||
+ (phba->fc_flag & FC_RSCN_MODE)) {
+ lpfc_set_disctmo(phba);
+ }
+ }
+
+ phba->fc_stat.elsXmitRetry++;
+ if (delay) {
+ phba->fc_stat.elsDelayRetry++;
+ ndlp->nlp_retry = cmdiocb->retry;
+
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_last_elscmd = cmd;
+
+ return (1);
+ }
+ switch (cmd) {
+ case ELS_CMD_FLOGI:
+ lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_PLOGI:
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_ADISC:
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_PRLI:
+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+ lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
+ return (1);
+ case ELS_CMD_LOGO:
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
+ return (1);
+ }
+ }
+
+ /* No retry ELS command <elsCmd> to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0108 No retry ELS command x%x to remote NPORT x%x "
+ "Data: x%x x%x\n",
+ phba->brd_no,
+ cmd, ndlp->nlp_DID, cmdiocb->retry, ndlp->nlp_flag);
+
+ return (0);
+}
+
+int
+lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
+{
+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+
+ /* context2 = cmd, context2->next = rsp, context3 = bpl */
+ if (elsiocb->context2) {
+ buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ /* Free the response before processing the command. */
+ if (!list_empty(&buf_ptr1->list)) {
+ list_remove_head(&buf_ptr1->list, buf_ptr,
+ struct lpfc_dmabuf,
+ list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+ kfree(buf_ptr1);
+ }
+
+ if (elsiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ spin_lock_irq(phba->host->host_lock);
+ list_add_tail(&elsiocb->list, &phba->lpfc_iocb_list);
+ spin_unlock_irq(phba->host->host_lock);
+ return 0;
+}
+
+static void
+lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+
+ /* ACC to LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0109 ACC to LOGO completes to NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+ spin_unlock_irq(phba->host->host_lock);
+
+ switch (ndlp->nlp_state) {
+ case NLP_STE_UNUSED_NODE: /* node is just allocated */
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ break;
+ case NLP_STE_NPR_NODE: /* NPort Recovery mode */
+ lpfc_unreg_rpi(phba, ndlp);
+ break;
+ default:
+ break;
+ }
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_nodelist *ndlp;
+ LPFC_MBOXQ_t *mbox = NULL;
+
+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ if (cmdiocb->context_un.mbox)
+ mbox = cmdiocb->context_un.mbox;
+
+
+ /* Check to see if link went down during discovery */
+ if ((lpfc_els_chk_latt(phba)) || !ndlp) {
+ if (mbox) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ }
+ goto out;
+ }
+
+ /* ELS response tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0110 ELS response tag x%x completes "
+ "Data: x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
+ rspiocb->iocb.un.ulpWord[4], ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ if (mbox) {
+ if ((rspiocb->iocb.ulpStatus == 0)
+ && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ /* set_slim mailbox command needs to execute first,
+ * queue this command to be processed later.
+ */
+ lpfc_unreg_rpi(phba, ndlp);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ mbox->context2 = ndlp;
+ ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
+ if (lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ != MBX_NOT_FINISHED) {
+ goto out;
+ }
+ /* NOTE: we should have messages for unsuccessful
+ reglogin */
+ mempool_free( mbox, phba->mbox_mem_pool);
+ } else {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+ }
+ }
+out:
+ if (ndlp) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ lpfc_els_free_iocb(phba, cmdiocb);
+ return;
+}
+
+int
+lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
+ LPFC_MBOXQ_t * mbox, uint8_t newnode)
+{
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ oldcmd = &oldiocb->iocb;
+
+ switch (flag) {
+ case ELS_CMD_ACC:
+ cmdsize = sizeof (uint32_t);
+ if ((elsiocb =
+ lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+ break;
+ case ELS_CMD_PLOGI:
+ cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
+ if ((elsiocb =
+ lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ if (mbox)
+ elsiocb->context_un.mbox = mbox;
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ break;
+ default:
+ return (1);
+ }
+
+ if (newnode)
+ elsiocb->context1 = NULL;
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0128 Xmit ELS ACC response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+ } else {
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ }
+
+ phba->fc_stat.elsXmitACC++;
+ spin_lock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ spin_unlock_irq(phba->host->host_lock);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = 2 * sizeof (uint32_t);
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_LS_RJT)) == 0) {
+ return (1);
+ }
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+ pcmd += sizeof (uint32_t);
+ *((uint32_t *) (pcmd)) = rejectError;
+
+ /* Xmit ELS RJT <err> response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0129 Xmit ELS RJT x%x response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ rejectError, elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ phba->fc_stat.elsXmitLSRJT++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ spin_lock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ spin_unlock_irq(phba->host->host_lock);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ ADISC *ap;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = sizeof (uint32_t) + sizeof (ADISC);
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+
+ /* Xmit ADISC ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0130 Xmit ADISC ACC response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+
+ ap = (ADISC *) (pcmd);
+ ap->hardAL_PA = phba->fc_pref_ALPA;
+ memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ ap->DID = be32_to_cpu(phba->fc_myDID);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ spin_lock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ spin_unlock_irq(phba->host->host_lock);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ PRLI *npr;
+ lpfc_vpd_t *vpd;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+
+ cmdsize = sizeof (uint32_t) + sizeof (PRLI);
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp,
+ (ELS_CMD_ACC |
+ (ELS_CMD_PRLI & ~ELS_RSP_MASK)))) ==
+ 0) {
+ return (1);
+ }
+
+ /* Xmit PRLI ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0131 Xmit PRLI ACC response tag x%x "
+ "Data: x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+ pcmd += sizeof (uint32_t);
+
+ /* For PRLI, remainder of payload is PRLI parameter page */
+ memset(pcmd, 0, sizeof (PRLI));
+
+ npr = (PRLI *) pcmd;
+ vpd = &phba->vpd;
+ /*
+ * If our firmware version is 3.20 or later,
+ * set the following bits for FC-TAPE support.
+ */
+ if (vpd->rev.feaLevelHigh >= 0x02) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+
+ npr->acceptRspCode = PRLI_REQ_EXECUTED;
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+ npr->ConfmComplAllowed = 1;
+
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+
+ spin_lock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ spin_unlock_irq(phba->host->host_lock);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
+ uint8_t format,
+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+{
+ RNID *rn;
+ IOCB_t *icmd;
+ IOCB_t *oldcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
+ + (2 * sizeof (struct lpfc_name));
+ if (format)
+ cmdsize += sizeof (RNID_TOP_DISC);
+
+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ELS_CMD_ACC)) == 0) {
+ return (1);
+ }
+
+ /* Xmit RNID ACC response tag <ulpIoTag> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0132 Xmit RNID ACC response tag x%x "
+ "Data: x%x\n",
+ phba->brd_no,
+ elsiocb->iocb.ulpIoTag,
+ elsiocb->iocb.ulpContext);
+
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof (uint32_t);
+
+ memset(pcmd, 0, sizeof (RNID));
+ rn = (RNID *) (pcmd);
+ rn->Format = format;
+ rn->CommonLen = (2 * sizeof (struct lpfc_name));
+ memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
+ memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ switch (format) {
+ case 0:
+ rn->SpecificLen = 0;
+ break;
+ case RNID_TOPOLOGY_DISC:
+ rn->SpecificLen = sizeof (RNID_TOP_DISC);
+ memcpy(&rn->un.topologyDisc.portName,
+ &phba->fc_portname, sizeof (struct lpfc_name));
+ rn->un.topologyDisc.unitType = RNID_HBA;
+ rn->un.topologyDisc.physPort = 0;
+ rn->un.topologyDisc.attachedNodes = 0;
+ break;
+ default:
+ rn->CommonLen = 0;
+ rn->SpecificLen = 0;
+ break;
+ }
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
+ * it could be freed */
+
+ spin_lock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ spin_unlock_irq(phba->host->host_lock);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return (1);
+ }
+ return (0);
+}
+
+int
+lpfc_els_disc_adisc(struct lpfc_hba * phba)
+{
+ int sentadisc;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ sentadisc = 0;
+ /* go thru NPR list and issue any remaining ELS ADISCs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp,
+ NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, 0);
+ sentadisc++;
+ phba->num_disc_nodes++;
+ if (phba->num_disc_nodes >=
+ phba->cfg_discovery_threads) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(phba->host->host_lock);
+ break;
+ }
+ }
+ }
+ }
+ if (sentadisc == 0) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ return(sentadisc);
+}
+
+int
+lpfc_els_disc_plogi(struct lpfc_hba * phba)
+{
+ int sentplogi;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ sentplogi = 0;
+ /* go thru NPR list and issue any remaining ELS PLOGIs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if ((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+ (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ sentplogi++;
+ phba->num_disc_nodes++;
+ if (phba->num_disc_nodes >=
+ phba->cfg_discovery_threads) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(phba->host->host_lock);
+ break;
+ }
+ }
+ }
+ }
+ if (sentplogi == 0) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ return(sentplogi);
+}
+
+int
+lpfc_els_flush_rscn(struct lpfc_hba * phba)
+{
+ struct lpfc_dmabuf *mp;
+ int i;
+
+ for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
+ mp = phba->fc_rscn_id_list[i];
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ phba->fc_rscn_id_list[i] = NULL;
+ }
+ phba->fc_rscn_id_cnt = 0;
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_can_disctmo(phba);
+ return (0);
+}
+
+int
+lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
+{
+ D_ID ns_did;
+ D_ID rscn_did;
+ struct lpfc_dmabuf *mp;
+ uint32_t *lp;
+ uint32_t payload_len, cmd, i, match;
+
+ ns_did.un.word = did;
+ match = 0;
+
+ /* Never match fabric nodes for RSCNs */
+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+ return(0);
+
+ /* If we are doing a FULL RSCN rediscovery, match everything */
+ if (phba->fc_flag & FC_RSCN_DISCOVERY) {
+ return (did);
+ }
+
+ for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
+ mp = phba->fc_rscn_id_list[i];
+ lp = (uint32_t *) mp->virt;
+ cmd = *lp++;
+ payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
+ payload_len -= sizeof (uint32_t); /* take off word 0 */
+ while (payload_len) {
+ rscn_did.un.word = *lp++;
+ rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
+ payload_len -= sizeof (uint32_t);
+ switch (rscn_did.un.b.resv) {
+ case 0: /* Single N_Port ID effected */
+ if (ns_did.un.word == rscn_did.un.word) {
+ match = did;
+ }
+ break;
+ case 1: /* Whole N_Port Area effected */
+ if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+ && (ns_did.un.b.area == rscn_did.un.b.area))
+ {
+ match = did;
+ }
+ break;
+ case 2: /* Whole N_Port Domain effected */
+ if (ns_did.un.b.domain == rscn_did.un.b.domain)
+ {
+ match = did;
+ }
+ break;
+ case 3: /* Whole Fabric effected */
+ match = did;
+ break;
+ default:
+ /* Unknown Identifier in RSCN list */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0217 Unknown Identifier in "
+ "RSCN payload Data: x%x\n",
+ phba->brd_no, rscn_did.un.word);
+ break;
+ }
+ if (match) {
+ break;
+ }
+ }
+ }
+ return (match);
+}
+
+static int
+lpfc_rscn_recovery_check(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
+ struct list_head *listp;
+ struct list_head *node_list[7];
+ int i;
+
+ /* Look at all nodes effected by pending RSCNs and move
+ * them to NPR list.
+ */
+ node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
+ node_list[1] = &phba->fc_nlpmap_list;
+ node_list[2] = &phba->fc_nlpunmap_list;
+ node_list[3] = &phba->fc_prli_list;
+ node_list[4] = &phba->fc_reglogin_list;
+ node_list[5] = &phba->fc_adisc_list;
+ node_list[6] = &phba->fc_plogi_list;
+ for (i = 0; i < 7; i++) {
+ listp = node_list[i];
+ if (list_empty(listp))
+ continue;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
+ if (!(lpfc_rscn_payload_check(phba, ndlp->nlp_DID)))
+ continue;
+
+ lpfc_disc_state_machine(phba, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ if (!list_empty(&ndlp->
+ els_retry_evt.evt_listp))
+ list_del_init(&ndlp->
+ els_retry_evt.evt_listp);
+ }
+ }
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rcv_rscn(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp, uint8_t newnode)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ uint32_t payload_len, cmd;
+
+ icmd = &cmdiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
+ payload_len -= sizeof (uint32_t); /* take off word 0 */
+ cmd &= ELS_CMD_MASK;
+
+ /* RSCN received */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
+
+ /* If we are about to begin discovery, just ACC the RSCN.
+ * Discovery processing will satisfy it.
+ */
+ if (phba->hba_state < LPFC_NS_QRY) {
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+ newnode);
+ return (0);
+ }
+
+ /* If we are already processing an RSCN, save the received
+ * RSCN payload buffer, cmdiocb->context2 to process later.
+ */
+ if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+ if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
+ !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_RSCN_MODE;
+ spin_unlock_irq(phba->host->host_lock);
+ phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+
+ /* If we zero, cmdiocb->context2, the calling
+ * routine will not try to free it.
+ */
+ cmdiocb->context2 = NULL;
+
+ /* Deferred RSCN */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0235 Deferred RSCN "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->fc_rscn_id_cnt,
+ phba->fc_flag, phba->hba_state);
+ } else {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(phba->host->host_lock);
+ /* ReDiscovery RSCN */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0234 ReDiscovery RSCN "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->fc_rscn_id_cnt,
+ phba->fc_flag, phba->hba_state);
+ }
+ /* Send back ACC */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+ newnode);
+
+ /* send RECOVERY event for ALL nodes that match RSCN payload */
+ lpfc_rscn_recovery_check(phba);
+ return (0);
+ }
+
+ phba->fc_flag |= FC_RSCN_MODE;
+ phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+ /*
+ * If we zero, cmdiocb->context2, the calling routine will
+ * not try to free it.
+ */
+ cmdiocb->context2 = NULL;
+
+ lpfc_set_disctmo(phba);
+
+ /* Send back ACC */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
+
+ /* send RECOVERY event for ALL nodes that match RSCN payload */
+ lpfc_rscn_recovery_check(phba);
+
+ return (lpfc_els_handle_rscn(phba));
+}
+
+int
+lpfc_els_handle_rscn(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp;
+
+ /* Start timer for RSCN processing */
+ lpfc_set_disctmo(phba);
+
+ /* RSCN processed */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ phba->fc_flag, 0, phba->fc_rscn_id_cnt,
+ phba->hba_state);
+
+ /* To process RSCN, first compare RSCN data with NameServer */
+ phba->fc_ns_retry = 0;
+ if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ NameServer_DID))) {
+ /* Good ndlp, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
+ /* Wait for NameServer query cmpl before we can
+ continue */
+ return (1);
+ }
+ } else {
+ /* If login to NameServer does not exist, issue one */
+ /* Good status, issue PLOGI to NameServer */
+ if ((ndlp =
+ lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID))) {
+ /* Wait for NameServer login cmpl before we can
+ continue */
+ return (1);
+ }
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+ == 0) {
+ lpfc_els_flush_rscn(phba);
+ return (0);
+ } else {
+ lpfc_nlp_init(phba, ndlp, NameServer_DID);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ /* Wait for NameServer login cmpl before we can
+ continue */
+ return (1);
+ }
+ }
+
+ lpfc_els_flush_rscn(phba);
+ return (0);
+}
+
+static int
+lpfc_els_rcv_flogi(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp, uint8_t newnode)
+{
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ uint32_t *lp = (uint32_t *) pcmd->virt;
+ IOCB_t *icmd = &cmdiocb->iocb;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+ struct ls_rjt stat;
+ uint32_t cmd, did;
+ int rc;
+
+ cmd = *lp++;
+ sp = (struct serv_parm *) lp;
+
+ /* FLOGI received */
+
+ lpfc_set_disctmo(phba);
+
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* We should never receive a FLOGI in loop mode, ignore it */
+ did = icmd->un.elsreq64.remoteID;
+
+ /* An FLOGI ELS command <elsCmd> was received from DID <did> in
+ Loop Mode */
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d:0113 An FLOGI ELS command x%x was received "
+ "from DID x%x in Loop Mode\n",
+ phba->brd_no, cmd, did);
+ return (1);
+ }
+
+ did = Fabric_DID;
+
+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
+ /* For a FLOGI we accept, then if our portname is greater
+ * then the remote portname we initiate Nport login.
+ */
+
+ rc = memcmp(&phba->fc_portname, &sp->portName,
+ sizeof (struct lpfc_name));
+
+ if (!rc) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL)) == 0) {
+ return (1);
+ }
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ }
+ return (1);
+ }
+ else if (rc > 0) { /* greater than */
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ phba->fc_flag |= FC_PT2PT;
+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ } else {
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ return (1);
+ }
+
+ /* Send back ACC */
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
+
+ return (0);
+}
+
+static int
+lpfc_els_rcv_rnid(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ RNID *rn;
+ struct ls_rjt stat;
+ uint32_t cmd, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ rn = (RNID *) lp;
+
+ /* RNID received */
+
+ switch (rn->Format) {
+ case 0:
+ case RNID_TOPOLOGY_DISC:
+ /* Send back ACC */
+ lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
+ break;
+ default:
+ /* Reject this request because format not supported */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rcv_rrq(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ RRQ *rrq;
+ uint32_t cmd, did;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_FCP_RING];
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ rrq = (RRQ *) lp;
+
+ /* RRQ received */
+ /* Get oxid / rxid from payload and abort it */
+ spin_lock_irq(phba->host->host_lock);
+ if ((rrq->SID == be32_to_cpu(phba->fc_myDID))) {
+ lpfc_sli_abort_iocb(phba, pring, 0, 0, rrq->Oxid,
+ LPFC_CTX_CTX);
+ } else {
+ lpfc_sli_abort_iocb(phba, pring, 0, 0, rrq->Rxid,
+ LPFC_CTX_CTX);
+ }
+
+ spin_unlock_irq(phba->host->host_lock);
+ /* ACCEPT the rrq request */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ return 0;
+}
+
+static int
+lpfc_els_rcv_farp(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ FARP *fp;
+ uint32_t cmd, cnt, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ fp = (FARP *) lp;
+
+ /* FARP-REQ received from DID <did> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_IP,
+ "%d:0601 FARP-REQ received from DID x%x\n",
+ phba->brd_no, did);
+
+ /* We will only support match on WWPN or WWNN */
+ if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
+ return (0);
+ }
+
+ cnt = 0;
+ /* If this FARP command is searching for my portname */
+ if (fp->Mflags & FARP_MATCH_PORT) {
+ if (memcmp(&fp->RportName, &phba->fc_portname,
+ sizeof (struct lpfc_name)) == 0)
+ cnt = 1;
+ }
+
+ /* If this FARP command is searching for my nodename */
+ if (fp->Mflags & FARP_MATCH_NODE) {
+ if (memcmp(&fp->RnodeName, &phba->fc_nodename,
+ sizeof (struct lpfc_name)) == 0)
+ cnt = 1;
+ }
+
+ if (cnt) {
+ if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+ /* Log back into the node before sending the FARP. */
+ if (fp->Rflags & FARP_REQUEST_PLOGI) {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+
+ /* Send a FARP response to that node */
+ if (fp->Rflags & FARP_REQUEST_FARPR) {
+ lpfc_issue_els_farpr(phba, did, 0);
+ }
+ }
+ }
+ return (0);
+}
+
+static int
+lpfc_els_rcv_farpr(struct lpfc_hba * phba,
+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ uint32_t cmd, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ /* FARP-RSP received from DID <did> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_IP,
+ "%d:0600 FARP-RSP received from DID x%x\n",
+ phba->brd_no, did);
+
+ /* ACCEPT the Farp resp request */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ return 0;
+}
+
+static int
+lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ FAN *fp;
+ uint32_t cmd, did;
+
+ icmd = &cmdiocb->iocb;
+ did = icmd->un.elsreq64.remoteID;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ fp = (FAN *) lp;
+
+ /* FAN received */
+
+ /* ACCEPT the FAN request */
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ /* The discovery state machine needs to take a different
+ * action if this node has switched fabrics
+ */
+ if ((memcmp(&fp->FportName, &phba->fc_fabparam.portName,
+ sizeof (struct lpfc_name)) != 0)
+ ||
+ (memcmp(&fp->FnodeName, &phba->fc_fabparam.nodeName,
+ sizeof (struct lpfc_name)) != 0)) {
+ /* This node has switched fabrics. An FLOGI is required
+ * after the timeout
+ */
+ return (0);
+ }
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ }
+
+ return (0);
+}
+
+void
+lpfc_els_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ if (phba == 0)
+ return;
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
+ phba->work_hba_events |= WORKER_ELS_TMO;
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+void
+lpfc_els_timeout_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_dmabuf *pcmd;
+ struct list_head *dlp;
+ uint32_t *elscmd;
+ uint32_t els_command;
+ uint32_t timeout;
+ uint32_t remote_ID;
+
+ if (phba == 0)
+ return;
+ spin_lock_irq(phba->host->host_lock);
+ /* If the timer is already canceled do nothing */
+ if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+ }
+ timeout = (uint32_t)(phba->fc_ratov << 1);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ dlp = &pring->txcmplq;
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ cmd = &piocb->iocb;
+
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ continue;
+ }
+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ elscmd = (uint32_t *) (pcmd->virt);
+ els_command = *elscmd;
+
+ if ((els_command == ELS_CMD_FARP)
+ || (els_command == ELS_CMD_FARPR)) {
+ continue;
+ }
+
+ if (piocb->drvrTimeout > 0) {
+ if (piocb->drvrTimeout >= timeout) {
+ piocb->drvrTimeout -= timeout;
+ } else {
+ piocb->drvrTimeout = 0;
+ }
+ continue;
+ }
+
+ list_del(&piocb->list);
+ pring->txcmplq_cnt--;
+
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+ remote_ID = ndlp->nlp_DID;
+ if (cmd->un.elsreq64.bdl.ulpIoTag32) {
+ lpfc_sli_issue_abort_iotag32(phba,
+ pring, piocb);
+ }
+ } else {
+ remote_ID = cmd->un.elsreq64.remoteID;
+ }
+
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_ELS,
+ "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
+ phba->brd_no, els_command,
+ remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+
+ /*
+ * The iocb has timed out; abort it.
+ */
+ if (piocb->iocb_cmpl) {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ spin_unlock_irq(phba->host->host_lock);
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ spin_lock_irq(phba->host->host_lock);
+ } else {
+ list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
+ }
+ }
+ if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) {
+ phba->els_tmofunc.expires = jiffies + HZ * timeout;
+ add_timer(&phba->els_tmofunc);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+}
+
+void
+lpfc_els_flush_cmd(struct lpfc_hba * phba)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *elscmd;
+ uint32_t els_command;
+ uint32_t remote_ID;
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ spin_lock_irq(phba->host->host_lock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
+ cmd = &piocb->iocb;
+
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ continue;
+ }
+
+ /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
+ if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
+ (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
+ (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
+ (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
+ continue;
+ }
+
+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ elscmd = (uint32_t *) (pcmd->virt);
+ els_command = *elscmd;
+
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+ remote_ID = ndlp->nlp_DID;
+ if (phba->hba_state == LPFC_HBA_READY) {
+ continue;
+ }
+ } else {
+ remote_ID = cmd->un.elsreq64.remoteID;
+ }
+
+ list_del(&piocb->list);
+ pring->txcmplq_cnt--;
+
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+
+ if (piocb->iocb_cmpl) {
+ spin_unlock_irq(phba->host->host_lock);
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ spin_lock_irq(phba->host->host_lock);
+ }
+ else
+ list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
+ }
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ cmd = &piocb->iocb;
+
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ continue;
+ }
+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ elscmd = (uint32_t *) (pcmd->virt);
+ els_command = *elscmd;
+
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
+ remote_ID = ndlp->nlp_DID;
+ if (phba->hba_state == LPFC_HBA_READY) {
+ continue;
+ }
+ } else {
+ remote_ID = cmd->un.elsreq64.remoteID;
+ }
+
+ list_del(&piocb->list);
+ pring->txcmplq_cnt--;
+
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+
+ if (piocb->iocb_cmpl) {
+ spin_unlock_irq(phba->host->host_lock);
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ spin_lock_irq(phba->host->host_lock);
+ }
+ else
+ list_add_tail(&piocb->list, &phba->lpfc_iocb_list);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+
+void
+lpfc_els_unsol_event(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct ls_rjt stat;
+ uint32_t cmd;
+ uint32_t did;
+ uint32_t newnode;
+ uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
+ uint32_t rjt_err = 0;
+
+ psli = &phba->sli;
+ icmd = &elsiocb->iocb;
+
+ if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
+ /* Not enough posted buffers; Try posting more buffers */
+ phba->fc_stat.NoRcvBuf++;
+ lpfc_post_buffer(phba, pring, 0, 1);
+ return;
+ }
+
+ /* If there are no BDEs associated with this IOCB,
+ * there is nothing to do.
+ */
+ if (icmd->ulpBdeCount == 0)
+ return;
+
+ /* type of ELS cmd is first 32bit word in packet */
+ mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
+ cont64[0].
+ addrHigh,
+ icmd->un.
+ cont64[0].addrLow));
+ if (mp == 0) {
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ newnode = 0;
+ lp = (uint32_t *) mp->virt;
+ cmd = *lp++;
+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
+
+ if (icmd->ulpStatus) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ /* Check to see if link went down during discovery */
+ if (lpfc_els_chk_latt(phba)) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ did = icmd->un.rcvels.remoteID;
+ if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+ == 0) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ drop_cmd = 1;
+ goto dropit;
+ }
+
+ lpfc_nlp_init(phba, ndlp, did);
+ newnode = 1;
+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
+ ndlp->nlp_type |= NLP_FABRIC;
+ }
+ }
+
+ phba->fc_stat.elsRcvFrame++;
+ elsiocb->context1 = ndlp;
+ elsiocb->context2 = mp;
+
+ if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
+ cmd &= ELS_CMD_MASK;
+ }
+ /* ELS command <elsCmd> received from NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d:0112 ELS command x%x received from NPORT x%x "
+ "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
+
+ switch (cmd) {
+ case ELS_CMD_PLOGI:
+ phba->fc_stat.elsRcvPLOGI++;
+ if (phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
+ break;
+ case ELS_CMD_FLOGI:
+ phba->fc_stat.elsRcvFLOGI++;
+ lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
+ if (newnode) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ break;
+ case ELS_CMD_LOGO:
+ phba->fc_stat.elsRcvLOGO++;
+ if (phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ break;
+ case ELS_CMD_PRLO:
+ phba->fc_stat.elsRcvPRLO++;
+ if (phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+ break;
+ case ELS_CMD_RSCN:
+ phba->fc_stat.elsRcvRSCN++;
+ lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
+ if (newnode) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ break;
+ case ELS_CMD_ADISC:
+ phba->fc_stat.elsRcvADISC++;
+ if (phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
+ break;
+ case ELS_CMD_PDISC:
+ phba->fc_stat.elsRcvPDISC++;
+ if (phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
+ break;
+ case ELS_CMD_FARPR:
+ phba->fc_stat.elsRcvFARPR++;
+ lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_FARP:
+ phba->fc_stat.elsRcvFARP++;
+ lpfc_els_rcv_farp(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_FAN:
+ phba->fc_stat.elsRcvFAN++;
+ lpfc_els_rcv_fan(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_RRQ:
+ phba->fc_stat.elsRcvRRQ++;
+ lpfc_els_rcv_rrq(phba, elsiocb, ndlp);
+ break;
+ case ELS_CMD_PRLI:
+ phba->fc_stat.elsRcvPRLI++;
+ if (phba->hba_state < LPFC_DISC_AUTH) {
+ rjt_err = LSEXP_NOTHING_MORE;
+ break;
+ }
+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+ break;
+ case ELS_CMD_RNID:
+ phba->fc_stat.elsRcvRNID++;
+ lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
+ break;
+ default:
+ /* Unsupported ELS command, reject */
+ rjt_err = LSEXP_NOTHING_MORE;
+
+ /* Unknown ELS command <elsCmd> received from NPORT <did> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d:0115 Unknown ELS command x%x received from "
+ "NPORT x%x\n", phba->brd_no, cmd, did);
+ if (newnode) {
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ break;
+ }
+
+ /* check if need to LS_RJT received ELS cmd */
+ if (rjt_err) {
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = rjt_err;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
+ }
+
+ if (elsiocb->context2) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+dropit:
+ /* check if need to drop received ELS cmd */
+ if (drop_cmd == 1) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d:0111 Dropping received ELS cmd "
+ "Data: x%x x%x\n", phba->brd_no,
+ icmd->ulpStatus, icmd->un.ulpWord[4]);
+ phba->fc_stat.elsRcvDrop++;
+ }
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
new file mode 100644
index 00000000000..d546206038b
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -0,0 +1,2537 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_hbadisc.c 1.266 2005/04/13 11:59:06EDT sf_support Exp $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_disc.h"
+#include "lpfc_sli.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+/* AlpaArray for assignment of scsid for scan-down and bind_method */
+static uint8_t lpfcAlpaArray[] = {
+ 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
+ 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
+ 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
+ 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
+ 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
+ 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
+ 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
+ 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
+ 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
+ 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
+ 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
+ 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
+ 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
+};
+
+static void lpfc_disc_timeout_handler(struct lpfc_hba *);
+
+static void
+lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ /* Nodev timeout on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0203 Nodev timeout on NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+ }
+
+ spin_lock_irq(phba->host->host_lock);
+ if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+ }
+
+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
+
+ if (ndlp->nlp_sid != NLP_NO_SID) {
+ /* flush the target */
+ lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+
+ lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ return;
+}
+
+static void
+lpfc_work_list_done(struct lpfc_hba * phba)
+{
+ struct lpfc_work_evt *evtp = NULL;
+ struct lpfc_nodelist *ndlp;
+ int free_evt;
+
+ spin_lock_irq(phba->host->host_lock);
+ while(!list_empty(&phba->work_list)) {
+ list_remove_head((&phba->work_list), evtp, typeof(*evtp),
+ evt_listp);
+ spin_unlock_irq(phba->host->host_lock);
+ free_evt = 1;
+ switch(evtp->evt) {
+ case LPFC_EVT_NODEV_TMO:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_process_nodev_timeout(phba, ndlp);
+ free_evt = 0;
+ break;
+ case LPFC_EVT_ELS_RETRY:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0;
+ break;
+ case LPFC_EVT_ONLINE:
+ *(int *)(evtp->evt_arg1) = lpfc_online(phba);
+ complete((struct completion *)(evtp->evt_arg2));
+ break;
+ case LPFC_EVT_OFFLINE:
+ *(int *)(evtp->evt_arg1) = lpfc_offline(phba);
+ complete((struct completion *)(evtp->evt_arg2));
+ break;
+ }
+ if (free_evt)
+ kfree(evtp);
+ spin_lock_irq(phba->host->host_lock);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+
+}
+
+static void
+lpfc_work_done(struct lpfc_hba * phba)
+{
+ struct lpfc_sli_ring *pring;
+ int i;
+ uint32_t ha_copy;
+ uint32_t control;
+ uint32_t work_hba_events;
+
+ spin_lock_irq(phba->host->host_lock);
+ ha_copy = phba->work_ha;
+ phba->work_ha = 0;
+ work_hba_events=phba->work_hba_events;
+ spin_unlock_irq(phba->host->host_lock);
+
+ if(ha_copy & HA_ERATT)
+ lpfc_handle_eratt(phba);
+
+ if(ha_copy & HA_MBATT)
+ lpfc_sli_handle_mb_event(phba);
+
+ if(ha_copy & HA_LATT)
+ lpfc_handle_latt(phba);
+
+ if (work_hba_events & WORKER_DISC_TMO)
+ lpfc_disc_timeout_handler(phba);
+
+ if (work_hba_events & WORKER_ELS_TMO)
+ lpfc_els_timeout_handler(phba);
+
+ if (work_hba_events & WORKER_MBOX_TMO)
+ lpfc_mbox_timeout_handler(phba);
+
+ if (work_hba_events & WORKER_FDMI_TMO)
+ lpfc_fdmi_tmo_handler(phba);
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->work_hba_events &= ~work_hba_events;
+ spin_unlock_irq(phba->host->host_lock);
+
+ for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
+ pring = &phba->sli.ring[i];
+ if ((ha_copy & HA_RXATT)
+ || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+ if (pring->flag & LPFC_STOP_IOCB_MASK) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ } else {
+ lpfc_sli_handle_slow_ring_event(phba, pring,
+ (ha_copy &
+ HA_RXMASK));
+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+ }
+ /*
+ * Turn on Ring interrupts
+ */
+ spin_lock_irq(phba->host->host_lock);
+ control = readl(phba->HCregaddr);
+ control |= (HC_R0INT_ENA << i);
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ }
+
+ lpfc_work_list_done (phba);
+
+}
+
+static int
+check_work_wait_done(struct lpfc_hba *phba) {
+
+ spin_lock_irq(phba->host->host_lock);
+ if (phba->work_ha ||
+ phba->work_hba_events ||
+ (!list_empty(&phba->work_list)) ||
+ kthread_should_stop()) {
+ spin_unlock_irq(phba->host->host_lock);
+ return 1;
+ } else {
+ spin_unlock_irq(phba->host->host_lock);
+ return 0;
+ }
+}
+
+int
+lpfc_do_work(void *p)
+{
+ struct lpfc_hba *phba = p;
+ int rc;
+ DECLARE_WAIT_QUEUE_HEAD(work_waitq);
+
+ set_user_nice(current, -20);
+ phba->work_wait = &work_waitq;
+
+ while (1) {
+
+ rc = wait_event_interruptible(work_waitq,
+ check_work_wait_done(phba));
+ BUG_ON(rc);
+
+ if (kthread_should_stop())
+ break;
+
+ lpfc_work_done(phba);
+
+ }
+ phba->work_wait = NULL;
+ return 0;
+}
+
+/*
+ * This is only called to handle FC worker events. Since this a rare
+ * occurance, we allocate a struct lpfc_work_evt structure here instead of
+ * embedding it in the IOCB.
+ */
+int
+lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
+ uint32_t evt)
+{
+ struct lpfc_work_evt *evtp;
+
+ /*
+ * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
+ * be queued to worker thread for processing
+ */
+ evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
+ if (!evtp)
+ return 0;
+
+ evtp->evt_arg1 = arg1;
+ evtp->evt_arg2 = arg2;
+ evtp->evt = evt;
+
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_lock_irq(phba->host->host_lock);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ spin_unlock_irq(phba->host->host_lock);
+
+ return 1;
+}
+
+int
+lpfc_linkdown(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct list_head *listp;
+ struct list_head *node_list[7];
+ LPFC_MBOXQ_t *mb;
+ int rc, i;
+
+ psli = &phba->sli;
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->hba_state = LPFC_LINK_DOWN;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Clean up any firmware default rpi's */
+ if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+ lpfc_unreg_did(phba, 0xffffffff, mb);
+ mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+ if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mb, phba->mbox_mem_pool);
+ }
+ }
+
+ /* Cleanup any outstanding RSCN activity */
+ lpfc_els_flush_rscn(phba);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(phba);
+
+ /* Issue a LINK DOWN event to all nodes */
+ node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
+ node_list[1] = &phba->fc_nlpmap_list;
+ node_list[2] = &phba->fc_nlpunmap_list;
+ node_list[3] = &phba->fc_prli_list;
+ node_list[4] = &phba->fc_reglogin_list;
+ node_list[5] = &phba->fc_adisc_list;
+ node_list[6] = &phba->fc_plogi_list;
+ for (i = 0; i < 7; i++) {
+ listp = node_list[i];
+ if (list_empty(listp))
+ continue;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
+ /* Fabric nodes are not handled thru state machine for
+ link down */
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ /* Remove ALL Fabric nodes except Fabric_DID */
+ if (ndlp->nlp_DID != Fabric_DID) {
+ /* Take it off current list and free */
+ lpfc_nlp_list(phba, ndlp,
+ NLP_NO_LIST);
+ }
+ }
+ else {
+
+ rc = lpfc_disc_state_machine(phba, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+
+ /* Check config parameter use-adisc or FCP-2 */
+ if ((rc != NLP_STE_FREED_NODE) &&
+ (phba->cfg_use_adisc == 0) &&
+ !(ndlp->nlp_fcp_info &
+ NLP_FCP_2_DEVICE)) {
+ /* We know we will have to relogin, so
+ * unreglogin the rpi right now to fail
+ * any outstanding I/Os quickly.
+ */
+ lpfc_unreg_rpi(phba, ndlp);
+ }
+ }
+ }
+ }
+
+ /* free any ndlp's on unused list */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+ nlp_listp) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+
+ /* Setup myDID for link up if we are in pt2pt mode */
+ if (phba->fc_flag & FC_PT2PT) {
+ phba->fc_myDID = 0;
+ if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+ lpfc_config_link(phba, mb);
+ mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+ if (lpfc_sli_issue_mbox
+ (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( mb, phba->mbox_mem_pool);
+ }
+ }
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_LBIT;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Turn off discovery timer if its running */
+ lpfc_can_disctmo(phba);
+
+ /* Must process IOCBs on all rings to handle ABORTed I/Os */
+ return (0);
+}
+
+static int
+lpfc_linkup(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->hba_state = LPFC_LINK_UP;
+ phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ phba->fc_flag |= FC_NDISC_ACTIVE;
+ phba->fc_ns_retry = 0;
+ spin_unlock_irq(phba->host->host_lock);
+
+
+ /*
+ * Clean up old Fabric NLP_FABRIC logins.
+ */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
+ nlp_listp) {
+ if (ndlp->nlp_DID == Fabric_DID) {
+ /* Take it off current list and free */
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+ }
+
+ /* free any ndlp's on unused list */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+ nlp_listp) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+
+ return 0;
+}
+
+/*
+ * This routine handles processing a CLEAR_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ uint32_t control;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ /* Since we don't do discovery right now, turn these off here */
+ psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+
+ /* Check for error */
+ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
+ /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d:0320 CLEAR_LA mbxStatus error x%x hba "
+ "state x%x\n",
+ phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+ phba->hba_state = LPFC_HBA_ERROR;
+ goto out;
+ }
+
+ if (phba->fc_flag & FC_ABORT_DISCOVERY)
+ goto out;
+
+ phba->num_disc_nodes = 0;
+ /* go thru NPR list and issue ELS PLOGIs */
+ if (phba->fc_npr_cnt) {
+ lpfc_els_disc_plogi(phba);
+ }
+
+ if(!phba->num_disc_nodes) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+
+ phba->hba_state = LPFC_HBA_READY;
+
+out:
+ /* Device Discovery completes */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0225 Device Discovery completes\n",
+ phba->brd_no);
+
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_ABORT_DISCOVERY;
+ if (phba->fc_flag & FC_ESTABLISH_LINK) {
+ phba->fc_flag &= ~FC_ESTABLISH_LINK;
+ }
+ spin_unlock_irq(phba->host->host_lock);
+
+ del_timer_sync(&phba->fc_estabtmo);
+
+ lpfc_can_disctmo(phba);
+
+ /* turn on Link Attention interrupts */
+ spin_lock_irq(phba->host->host_lock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(phba->host->host_lock);
+
+ return;
+}
+
+static void
+lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ /* Check for error */
+ if (mb->mbxStatus) {
+ /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d:0306 CONFIG_LINK mbxStatus error x%x "
+ "HBA state x%x\n",
+ phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+ lpfc_linkdown(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+ goto out;
+ }
+
+ if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* If we are public loop and L bit was set */
+ if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
+ !(phba->fc_flag & FC_LBIT)) {
+ /* Need to wait for FAN - use discovery timer
+ * for timeout. hba_state is identically
+ * LPFC_LOCAL_CFG_LINK while waiting for FAN
+ */
+ lpfc_set_disctmo(phba);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+ }
+ }
+
+ /* Start discovery by sending a FLOGI hba_state is identically
+ * LPFC_FLOGI while waiting for FLOGI cmpl
+ */
+ phba->hba_state = LPFC_FLOGI;
+ lpfc_set_disctmo(phba);
+ lpfc_initial_flogi(phba);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+ }
+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+out:
+ /* CONFIG_LINK bad hba state <hba_state> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0200 CONFIG_LINK bad hba state x%x\n",
+ phba->brd_no, phba->hba_state);
+
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ lpfc_clear_la(phba, pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ }
+ } else {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ }
+ return;
+}
+
+static void
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+
+
+ /* Check for error */
+ if (mb->mbxStatus) {
+ /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d:0319 READ_SPARAM mbxStatus error x%x "
+ "hba state x%x>\n",
+ phba->brd_no, mb->mbxStatus, phba->hba_state);
+
+ lpfc_linkdown(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+ goto out;
+ }
+
+ memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
+ sizeof (struct serv_parm));
+ memcpy((uint8_t *) & phba->fc_nodename,
+ (uint8_t *) & phba->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ memcpy((uint8_t *) & phba->fc_portname,
+ (uint8_t *) & phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+
+out:
+ pmb->context1 = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (phba->hba_state != LPFC_CLEAR_LA) {
+ lpfc_clear_la(phba, pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ }
+ } else {
+ mempool_free( pmb, phba->mbox_mem_pool);
+ }
+ return;
+}
+
+static void
+lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
+{
+ int i;
+ LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+ sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ spin_lock_irq(phba->host->host_lock);
+ switch(la->UlnkSpeed) {
+ case LA_1GHZ_LINK:
+ phba->fc_linkspeed = LA_1GHZ_LINK;
+ break;
+ case LA_2GHZ_LINK:
+ phba->fc_linkspeed = LA_2GHZ_LINK;
+ break;
+ case LA_4GHZ_LINK:
+ phba->fc_linkspeed = LA_4GHZ_LINK;
+ break;
+ default:
+ phba->fc_linkspeed = LA_UNKNW_LINK;
+ break;
+ }
+
+ phba->fc_topology = la->topology;
+
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* Get Loop Map information */
+
+ if (la->il)
+ phba->fc_flag |= FC_LBIT;
+
+ phba->fc_myDID = la->granted_AL_PA;
+ i = la->un.lilpBde64.tus.f.bdeSize;
+
+ if (i == 0) {
+ phba->alpa_map[0] = 0;
+ } else {
+ if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
+ int numalpa, j, k;
+ union {
+ uint8_t pamap[16];
+ struct {
+ uint32_t wd1;
+ uint32_t wd2;
+ uint32_t wd3;
+ uint32_t wd4;
+ } pa;
+ } un;
+ numalpa = phba->alpa_map[0];
+ j = 0;
+ while (j < numalpa) {
+ memset(un.pamap, 0, 16);
+ for (k = 1; j < numalpa; k++) {
+ un.pamap[k - 1] =
+ phba->alpa_map[j + 1];
+ j++;
+ if (k == 16)
+ break;
+ }
+ /* Link Up Event ALPA map */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_LINK_EVENT,
+ "%d:1304 Link Up Event "
+ "ALPA map Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ un.pa.wd1, un.pa.wd2,
+ un.pa.wd3, un.pa.wd4);
+ }
+ }
+ }
+ } else {
+ phba->fc_myDID = phba->fc_pref_DID;
+ phba->fc_flag |= FC_LBIT;
+ }
+ spin_unlock_irq(phba->host->host_lock);
+
+ lpfc_linkup(phba);
+ if (sparam_mbox) {
+ lpfc_read_sparam(phba, sparam_mbox);
+ sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+ lpfc_sli_issue_mbox(phba, sparam_mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ }
+
+ if (cfglink_mbox) {
+ phba->hba_state = LPFC_LOCAL_CFG_LINK;
+ lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
+ lpfc_sli_issue_mbox(phba, cfglink_mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ }
+}
+
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
+ uint32_t control;
+ struct lpfc_sli *psli = &phba->sli;
+
+ lpfc_linkdown(phba);
+
+ /* turn on Link Attention interrupts - no CLEAR_LA needed */
+ spin_lock_irq(phba->host->host_lock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(phba->host->host_lock);
+}
+
+/*
+ * This routine handles processing a READ_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ READ_LA_VAR *la;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ /* Check for error */
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_LINK_EVENT,
+ "%d:1307 READ_LA mbox error x%x state x%x\n",
+ phba->brd_no,
+ mb->mbxStatus, phba->hba_state);
+ lpfc_mbx_issue_link_down(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+ goto lpfc_mbx_cmpl_read_la_free_mbuf;
+ }
+
+ la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
+
+ memcpy(&phba->alpa_map[0], mp->virt, 128);
+
+ if (((phba->fc_eventTag + 1) < la->eventTag) ||
+ (phba->fc_eventTag == la->eventTag)) {
+ phba->fc_stat.LinkMultiEvent++;
+ if (la->attType == AT_LINK_UP) {
+ if (phba->fc_eventTag != 0)
+ lpfc_linkdown(phba);
+ }
+ }
+
+ phba->fc_eventTag = la->eventTag;
+
+ if (la->attType == AT_LINK_UP) {
+ phba->fc_stat.LinkUp++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "%d:1303 Link Up Event x%x received "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, la->eventTag, phba->fc_eventTag,
+ la->granted_AL_PA, la->UlnkSpeed,
+ phba->alpa_map[0]);
+ lpfc_mbx_process_link_up(phba, la);
+ } else {
+ phba->fc_stat.LinkDown++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "%d:1305 Link Down Event x%x received "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, la->eventTag, phba->fc_eventTag,
+ phba->hba_state, phba->fc_flag);
+ lpfc_mbx_issue_link_down(phba);
+ }
+
+lpfc_mbx_cmpl_read_la_free_mbuf:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+}
+
+/*
+ * This routine handles processing a REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ pmb->context1 = NULL;
+
+ /* Good status, call state machine */
+ lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+/*
+ * This routine handles processing a Fabric REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp_fdmi;
+
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ if (mb->mbxStatus) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ return;
+ }
+
+ pmb->context1 = NULL;
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
+ /* This NPort has been assigned an NPort_ID by the fabric as a
+ * result of the completed fabric login. Issue a State Change
+ * Registration (SCR) ELS request to the fabric controller
+ * (SCR_DID) so that this NPort gets RSCN events from the
+ * fabric.
+ */
+ lpfc_issue_els_scr(phba, SCR_DID, 0);
+
+ /* Allocate a new node instance. If the pool is empty, just
+ * start the discovery process and skip the Nameserver login
+ * process. This is attempted again later on. Otherwise, issue
+ * a Port Login (PLOGI) to the NameServer
+ */
+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
+ == 0) {
+ lpfc_disc_start(phba);
+ } else {
+ lpfc_nlp_init(phba, ndlp, NameServer_DID);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ if (phba->cfg_fdmi_on) {
+ if ((ndlp_fdmi = mempool_alloc(
+ phba->nlp_mem_pool,
+ GFP_KERNEL))) {
+ lpfc_nlp_init(phba, ndlp_fdmi,
+ FDMI_DID);
+ ndlp_fdmi->nlp_type |= NLP_FABRIC;
+ ndlp_fdmi->nlp_state =
+ NLP_STE_PLOGI_ISSUE;
+ lpfc_issue_els_plogi(phba, ndlp_fdmi,
+ 0);
+ }
+ }
+ }
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ if (mb->mbxStatus) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+
+ /* RegLogin failed, so just use loop map to make discovery
+ list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ return;
+ }
+
+ pmb->context1 = NULL;
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ if (phba->hba_state < LPFC_HBA_READY) {
+ /* Link up discovery requires Fabrib registration. */
+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
+ }
+
+ phba->fc_ns_retry = 0;
+ /* Good status, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
+ /* Cannot issue NameServer Query, so finish up discovery */
+ lpfc_disc_start(phba);
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+static void
+lpfc_register_remote_port(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp)
+{
+ struct fc_rport *rport;
+ struct lpfc_rport_data *rdata;
+ struct fc_rport_identifiers rport_ids;
+ uint64_t wwn;
+
+ /* Remote port has reappeared. Re-register w/ FC transport */
+ memcpy(&wwn, &ndlp->nlp_nodename, sizeof(uint64_t));
+ rport_ids.node_name = be64_to_cpu(wwn);
+ memcpy(&wwn, &ndlp->nlp_portname, sizeof(uint64_t));
+ rport_ids.port_name = be64_to_cpu(wwn);
+ rport_ids.port_id = ndlp->nlp_DID;
+ rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
+ ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
+ if (!rport) {
+ dev_printk(KERN_WARNING, &phba->pcidev->dev,
+ "Warning: fc_remote_port_add failed\n");
+ return;
+ }
+
+ /* initialize static port data */
+ rport->maxframe_size = ndlp->nlp_maxframe;
+ rport->supported_classes = ndlp->nlp_class_sup;
+ if ((rport->scsi_target_id != -1) &&
+ (rport->scsi_target_id < MAX_FCP_TARGET)) {
+ ndlp->nlp_sid = rport->scsi_target_id;
+ }
+ rdata = rport->dd_data;
+ rdata->pnode = ndlp;
+
+ return;
+}
+
+int
+lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
+{
+ enum { none, unmapped, mapped } rport_add = none, rport_del = none;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ /* Sanity check to ensure we are not moving to / from the same list */
+ if ((nlp->nlp_flag & NLP_LIST_MASK) == list) {
+ if (list != NLP_NO_LIST)
+ return(0);
+ }
+
+ switch(nlp->nlp_flag & NLP_LIST_MASK) {
+ case NLP_NO_LIST: /* Not on any list */
+ break;
+ case NLP_UNUSED_LIST:
+ phba->fc_unused_cnt--;
+ list_del(&nlp->nlp_listp);
+ break;
+ case NLP_PLOGI_LIST:
+ phba->fc_plogi_cnt--;
+ list_del(&nlp->nlp_listp);
+ break;
+ case NLP_ADISC_LIST:
+ phba->fc_adisc_cnt--;
+ list_del(&nlp->nlp_listp);
+ break;
+ case NLP_REGLOGIN_LIST:
+ phba->fc_reglogin_cnt--;
+ list_del(&nlp->nlp_listp);
+ break;
+ case NLP_PRLI_LIST:
+ phba->fc_prli_cnt--;
+ list_del(&nlp->nlp_listp);
+ break;
+ case NLP_UNMAPPED_LIST:
+ phba->fc_unmap_cnt--;
+ list_del(&nlp->nlp_listp);
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+ nlp->nlp_type &= ~NLP_FC_NODE;
+ spin_unlock_irq(phba->host->host_lock);
+ phba->nport_event_cnt++;
+ if (nlp->rport)
+ rport_del = unmapped;
+ break;
+ case NLP_MAPPED_LIST:
+ phba->fc_map_cnt--;
+ list_del(&nlp->nlp_listp);
+ phba->nport_event_cnt++;
+ if (nlp->rport)
+ rport_del = mapped;
+ break;
+ case NLP_NPR_LIST:
+ phba->fc_npr_cnt--;
+ list_del(&nlp->nlp_listp);
+ /* Stop delay tmo if taking node off NPR list */
+ if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
+ (list != NLP_NPR_LIST)) {
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ del_timer_sync(&nlp->nlp_delayfunc);
+ if (!list_empty(&nlp->els_retry_evt.evt_listp))
+ list_del_init(&nlp->els_retry_evt.evt_listp);
+ }
+ break;
+ }
+
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag &= ~NLP_LIST_MASK;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Add NPort <did> to <num> list */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_NODE,
+ "%d:0904 Add NPort x%x to %d list Data: x%x\n",
+ phba->brd_no,
+ nlp->nlp_DID, list, nlp->nlp_flag);
+
+ switch(list) {
+ case NLP_NO_LIST: /* No list, just remove it */
+ lpfc_nlp_remove(phba, nlp);
+ break;
+ case NLP_UNUSED_LIST:
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the unused list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
+ phba->fc_unused_cnt++;
+ break;
+ case NLP_PLOGI_LIST:
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the plogi list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
+ phba->fc_plogi_cnt++;
+ break;
+ case NLP_ADISC_LIST:
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the adisc list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
+ phba->fc_adisc_cnt++;
+ break;
+ case NLP_REGLOGIN_LIST:
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the reglogin list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
+ phba->fc_reglogin_cnt++;
+ break;
+ case NLP_PRLI_LIST:
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the prli list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
+ phba->fc_prli_cnt++;
+ break;
+ case NLP_UNMAPPED_LIST:
+ rport_add = unmapped;
+ /* ensure all vestiges of "mapped" significance are gone */
+ nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the unmap list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
+ phba->fc_unmap_cnt++;
+ phba->nport_event_cnt++;
+ /* stop nodev tmo if running */
+ if (nlp->nlp_flag & NLP_NODEV_TMO) {
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag &= ~NLP_NODEV_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ del_timer_sync(&nlp->nlp_tmofunc);
+ if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
+ list_del_init(&nlp->nodev_timeout_evt.
+ evt_listp);
+
+ }
+ nlp->nlp_type |= NLP_FC_NODE;
+ break;
+ case NLP_MAPPED_LIST:
+ rport_add = mapped;
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the map list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
+ phba->fc_map_cnt++;
+ phba->nport_event_cnt++;
+ /* stop nodev tmo if running */
+ if (nlp->nlp_flag & NLP_NODEV_TMO) {
+ nlp->nlp_flag &= ~NLP_NODEV_TMO;
+ del_timer_sync(&nlp->nlp_tmofunc);
+ if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
+ list_del_init(&nlp->nodev_timeout_evt.
+ evt_listp);
+
+ }
+ break;
+ case NLP_NPR_LIST:
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= list;
+ spin_unlock_irq(phba->host->host_lock);
+ /* Put it at the end of the npr list */
+ list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
+ phba->fc_npr_cnt++;
+
+ /*
+ * Sanity check for Fabric entity.
+ * Set nodev_tmo for NPR state, for Fabric use 1 sec.
+ */
+ if (nlp->nlp_type & NLP_FABRIC) {
+ mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
+ }
+ else {
+ mod_timer(&nlp->nlp_tmofunc,
+ jiffies + HZ * phba->cfg_nodev_tmo);
+ }
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= NLP_NODEV_TMO;
+ nlp->nlp_flag &= ~NLP_RCV_PLOGI;
+ spin_unlock_irq(phba->host->host_lock);
+ break;
+ case NLP_JUST_DQ:
+ break;
+ }
+
+ /*
+ * We make all the calls into the transport after we have
+ * moved the node between lists. This so that we don't
+ * release the lock while in-between lists.
+ */
+
+ /* Don't upcall midlayer if we're unloading */
+ if (!(phba->fc_flag & FC_UNLOADING)) {
+ /*
+ * We revalidate the rport pointer as the "add" function
+ * may have removed the remote port.
+ */
+ if ((rport_del != none) && nlp->rport)
+ fc_remote_port_block(nlp->rport);
+
+ if (rport_add != none) {
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ * sure to unblock any attached scsi devices
+ */
+ if (!nlp->rport)
+ lpfc_register_remote_port(phba, nlp);
+ else
+ fc_remote_port_unblock(nlp->rport);
+
+ /*
+ * if we added to Mapped list, but the remote port
+ * registration failed or assigned a target id outside
+ * our presentable range - move the node to the
+ * Unmapped List
+ */
+ if ((rport_add == mapped) &&
+ ((!nlp->rport) ||
+ (nlp->rport->scsi_target_id == -1) ||
+ (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
+ nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ spin_lock_irq(phba->host->host_lock);
+ nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
+ }
+ }
+ }
+ return (0);
+}
+
+/*
+ * Start / ReStart rescue timer for Discovery / RSCN handling
+ */
+void
+lpfc_set_disctmo(struct lpfc_hba * phba)
+{
+ uint32_t tmo;
+
+ tmo = ((phba->fc_ratov * 2) + 1);
+
+ mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_DISC_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Start Discovery Timer state <hba_state> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0247 Start Discovery Timer state x%x "
+ "Data: x%x x%lx x%x x%x\n",
+ phba->brd_no,
+ phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+ return;
+}
+
+/*
+ * Cancel rescue timer for Discovery / RSCN handling
+ */
+int
+lpfc_can_disctmo(struct lpfc_hba * phba)
+{
+ /* Turn off discovery timer if its running */
+ if (phba->fc_flag & FC_DISC_TMO) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ del_timer_sync(&phba->fc_disctmo);
+ phba->work_hba_events &= ~WORKER_DISC_TMO;
+ }
+
+ /* Cancel Discovery Timer state <hba_state> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0248 Cancel Discovery Timer state x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->hba_state, phba->fc_flag,
+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+ return (0);
+}
+
+/*
+ * Check specified ring for outstanding IOCB on the SLI queue
+ * Return true if iocb matches the specified nport
+ */
+int
+lpfc_check_sli_ndlp(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_sli *psli;
+ IOCB_t *icmd;
+
+ psli = &phba->sli;
+ icmd = &iocb->iocb;
+ if (pring->ringno == LPFC_ELS_RING) {
+ switch (icmd->ulpCommand) {
+ case CMD_GEN_REQUEST64_CR:
+ if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
+ return (1);
+ case CMD_ELS_REQUEST64_CR:
+ case CMD_XMIT_ELS_RSP64_CX:
+ if (iocb->context1 == (uint8_t *) ndlp)
+ return (1);
+ }
+ } else if (pring->ringno == psli->ip_ring) {
+
+ } else if (pring->ringno == psli->fcp_ring) {
+ /* Skip match check if waiting to relogin to FCP target */
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ return (0);
+ }
+ if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+ return (1);
+ }
+ } else if (pring->ringno == psli->next_ring) {
+
+ }
+ return (0);
+}
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with nlp_rpi in the LPFC_NODELIST entry.
+ */
+static int
+lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd;
+ uint32_t rpi, i;
+
+ /*
+ * Everything that matches on txcmplq will be returned
+ * by firmware with a no rpi error.
+ */
+ psli = &phba->sli;
+ rpi = ndlp->nlp_rpi;
+ if (rpi) {
+ /* Now process each ring */
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+
+ spin_lock_irq(phba->host->host_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
+ list) {
+ /*
+ * Check to see if iocb matches the nport we are
+ * looking for
+ */
+ if ((lpfc_check_sli_ndlp
+ (phba, pring, iocb, ndlp))) {
+ /* It matches, so deque and call compl
+ with an error */
+ list_del(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ spin_unlock_irq(phba->host->
+ host_lock);
+ (iocb->iocb_cmpl) (phba,
+ iocb, iocb);
+ spin_lock_irq(phba->host->
+ host_lock);
+ } else {
+ list_add_tail(&iocb->list,
+ &phba->lpfc_iocb_list);
+ }
+ }
+ }
+ spin_unlock_irq(phba->host->host_lock);
+
+ }
+ }
+ return (0);
+}
+
+/*
+ * Free rpi associated with LPFC_NODELIST entry.
+ * This routine is called from lpfc_freenode(), when we are removing
+ * a LPFC_NODELIST entry. It is also called if the driver initiates a
+ * LOGO that completes successfully, and we are waiting to PLOGI back
+ * to the remote NPort. In addition, it is called after we receive
+ * and unsolicated ELS cmd, send back a rsp, the rsp completes and
+ * we are waiting to PLOGI back to the remote NPort.
+ */
+int
+lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ if (ndlp->nlp_rpi) {
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+ lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
+ mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free( mbox, phba->mbox_mem_pool);
+ }
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ lpfc_no_rpi(phba, ndlp);
+ ndlp->nlp_rpi = 0;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Free resources associated with LPFC_NODELIST entry
+ * so it can be freed.
+ */
+static int
+lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ LPFC_MBOXQ_t *mb;
+ LPFC_MBOXQ_t *nextmb;
+ struct lpfc_dmabuf *mp;
+ struct fc_rport *rport;
+
+ /* Cleanup node for NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0900 Cleanup node for NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
+
+ /*
+ * if unloading the driver - just leave the remote port in place.
+ * The driver unload will force the attached devices to detach
+ * and flush cache's w/o generating flush errors.
+ */
+ if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
+ rport = ndlp->rport;
+ ndlp->rport = NULL;
+ fc_remote_port_unblock(rport);
+ fc_remote_port_delete(rport);
+ ndlp->nlp_sid = NLP_NO_SID;
+ }
+
+ /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+ if ((mb = phba->sli.mbox_active)) {
+ if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ mb->context2 = NULL;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ }
+ list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ mp = (struct lpfc_dmabuf *) (mb->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ list_del(&mb->list);
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+ }
+
+ lpfc_els_abort(phba,ndlp,0);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
+ spin_unlock_irq(phba->host->host_lock);
+ del_timer_sync(&ndlp->nlp_tmofunc);
+
+ del_timer_sync(&ndlp->nlp_delayfunc);
+
+ if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
+ list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
+ if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+
+ lpfc_unreg_rpi(phba, ndlp);
+
+ return (0);
+}
+
+/*
+ * Check to see if we can free the nlp back to the freelist.
+ * If we are in the middle of using the nlp in the discovery state
+ * machine, defer the free till we reach the end of the state machine.
+ */
+int
+lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ if (ndlp->nlp_flag & NLP_NODEV_TMO) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ del_timer_sync(&ndlp->nlp_tmofunc);
+ if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
+ list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
+
+ }
+
+
+ if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+ }
+
+ if (ndlp->nlp_disc_refcnt) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_REMOVE;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ else {
+ lpfc_freenode(phba, ndlp);
+ mempool_free( ndlp, phba->nlp_mem_pool);
+ }
+ return(0);
+}
+
+static int
+lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
+{
+ D_ID mydid;
+ D_ID ndlpdid;
+ D_ID matchdid;
+
+ if (did == Bcast_DID)
+ return (0);
+
+ if (ndlp->nlp_DID == 0) {
+ return (0);
+ }
+
+ /* First check for Direct match */
+ if (ndlp->nlp_DID == did)
+ return (1);
+
+ /* Next check for area/domain identically equals 0 match */
+ mydid.un.word = phba->fc_myDID;
+ if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
+ return (0);
+ }
+
+ matchdid.un.word = did;
+ ndlpdid.un.word = ndlp->nlp_DID;
+ if (matchdid.un.b.id == ndlpdid.un.b.id) {
+ if ((mydid.un.b.domain == matchdid.un.b.domain) &&
+ (mydid.un.b.area == matchdid.un.b.area)) {
+ if ((ndlpdid.un.b.domain == 0) &&
+ (ndlpdid.un.b.area == 0)) {
+ if (ndlpdid.un.b.id)
+ return (1);
+ }
+ return (0);
+ }
+
+ matchdid.un.word = ndlp->nlp_DID;
+ if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
+ (mydid.un.b.area == ndlpdid.un.b.area)) {
+ if ((matchdid.un.b.domain == 0) &&
+ (matchdid.un.b.area == 0)) {
+ if (matchdid.un.b.id)
+ return (1);
+ }
+ }
+ }
+ return (0);
+}
+
+/* Search for a nodelist entry on a specific list */
+struct lpfc_nodelist *
+lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ uint32_t data1;
+
+ if (order & NLP_SEARCH_UNMAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &phba->fc_nlpunmap_list, nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node DID unmapped */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0929 FIND node DID unmapped"
+ " Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_MAPPED) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* FIND node DID mapped */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0930 FIND node DID mapped "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_PLOGI) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to PLOGI */
+ /* FIND node DID plogi */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0908 FIND node DID plogi "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_ADISC) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to ADISC */
+ /* FIND node DID adisc */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID adisc "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_REGLOGIN) {
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &phba->fc_reglogin_list, nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to REGLOGIN */
+ /* FIND node DID reglogin */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID reglogin"
+ " Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_PRLI) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to PRLI */
+ /* FIND node DID prli */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID prli "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_NPR) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to NPR */
+ /* FIND node DID npr */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID npr "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ if (order & NLP_SEARCH_UNUSED) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ if (lpfc_matchdid(phba, ndlp, did)) {
+
+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
+ ((uint32_t) ndlp->nlp_xri << 16) |
+ ((uint32_t) ndlp->nlp_type << 8) |
+ ((uint32_t) ndlp->nlp_rpi & 0xff));
+ /* LOG change to UNUSED */
+ /* FIND node DID unused */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d:0931 FIND node DID unused "
+ "Data: x%p x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1);
+ return (ndlp);
+ }
+ }
+ }
+
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_NODE,
+ "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
+ phba->brd_no, did, order);
+
+ /* no match found */
+ return NULL;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
+{
+ struct lpfc_nodelist *ndlp;
+ uint32_t flg;
+
+ if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
+ if ((phba->hba_state == LPFC_HBA_READY) &&
+ ((lpfc_rscn_payload_check(phba, did) == 0)))
+ return NULL;
+ ndlp = (struct lpfc_nodelist *)
+ mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return NULL;
+ lpfc_nlp_init(phba, ndlp, did);
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ return ndlp;
+ }
+ if ((phba->hba_state == LPFC_HBA_READY) &&
+ (phba->fc_flag & FC_RSCN_MODE)) {
+ if (lpfc_rscn_payload_check(phba, did)) {
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ }
+ else {
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp = NULL;
+ }
+ }
+ else {
+ flg = ndlp->nlp_flag & NLP_LIST_MASK;
+ if ((flg == NLP_ADISC_LIST) ||
+ (flg == NLP_PLOGI_LIST)) {
+ return NULL;
+ }
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ }
+ return ndlp;
+}
+
+/* Build a list of nodes to discover based on the loopmap */
+void
+lpfc_disc_list_loopmap(struct lpfc_hba * phba)
+{
+ int j;
+ uint32_t alpa, index;
+
+ if (phba->hba_state <= LPFC_LINK_DOWN) {
+ return;
+ }
+ if (phba->fc_topology != TOPOLOGY_LOOP) {
+ return;
+ }
+
+ /* Check for loop map present or not */
+ if (phba->alpa_map[0]) {
+ for (j = 1; j <= phba->alpa_map[0]; j++) {
+ alpa = phba->alpa_map[j];
+
+ if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
+ continue;
+ }
+ lpfc_setup_disc_node(phba, alpa);
+ }
+ } else {
+ /* No alpamap, so try all alpa's */
+ for (j = 0; j < FC_MAXLOOP; j++) {
+ /* If cfg_scan_down is set, start from highest
+ * ALPA (0xef) to lowest (0x1).
+ */
+ if (phba->cfg_scan_down)
+ index = j;
+ else
+ index = FC_MAXLOOP - j - 1;
+ alpa = lpfcAlpaArray[index];
+ if ((phba->fc_myDID & 0xff) == alpa) {
+ continue;
+ }
+
+ lpfc_setup_disc_node(phba, alpa);
+ }
+ }
+ return;
+}
+
+/* Start Link up / RSCN discovery on NPR list */
+void
+lpfc_disc_start(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ uint32_t did_changed, num_sent;
+ uint32_t clear_la_pending;
+ int rc;
+
+ psli = &phba->sli;
+
+ if (phba->hba_state <= LPFC_LINK_DOWN) {
+ return;
+ }
+ if (phba->hba_state == LPFC_CLEAR_LA)
+ clear_la_pending = 1;
+ else
+ clear_la_pending = 0;
+
+ if (phba->hba_state < LPFC_HBA_READY) {
+ phba->hba_state = LPFC_DISC_AUTH;
+ }
+ lpfc_set_disctmo(phba);
+
+ if (phba->fc_prevDID == phba->fc_myDID) {
+ did_changed = 0;
+ } else {
+ did_changed = 1;
+ }
+ phba->fc_prevDID = phba->fc_myDID;
+ phba->num_disc_nodes = 0;
+
+ /* Start Discovery state <hba_state> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0202 Start Discovery hba state x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->hba_state, phba->fc_flag,
+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+
+ /* If our did changed, we MUST do PLOGI */
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ if (did_changed) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ }
+ }
+
+ /* First do ADISCs - if any */
+ num_sent = lpfc_els_disc_adisc(phba);
+
+ if (num_sent)
+ return;
+
+ if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
+ /* If we get here, there is nothing to ADISC */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ }
+ }
+ } else {
+ /* Next do PLOGIs - if any */
+ num_sent = lpfc_els_disc_plogi(phba);
+
+ if (num_sent)
+ return;
+
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /* Check to see if more RSCNs came in while we
+ * were processing this one.
+ */
+ if ((phba->fc_rscn_id_cnt == 0) &&
+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ else
+ lpfc_els_handle_rscn(phba);
+ }
+ }
+ return;
+}
+
+/*
+ * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
+ * ring the match the sppecified nodelist.
+ */
+static void
+lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+{
+ struct lpfc_sli *psli;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_dmabuf *mp;
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ /* Error matching iocb on txq or txcmplq
+ * First check the txq.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ if (iocb->context1 != ndlp) {
+ continue;
+ }
+ icmd = &iocb->iocb;
+ if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+ (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ list_del(&iocb->list);
+ pring->txq_cnt--;
+ lpfc_els_free_iocb(phba, iocb);
+ }
+ }
+
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ if (iocb->context1 != ndlp) {
+ continue;
+ }
+ icmd = &iocb->iocb;
+ if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+ (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ iocb->iocb_cmpl = NULL;
+ /* context2 = cmd, context2->next = rsp, context3 =
+ bpl */
+ if (iocb->context2) {
+ /* Free the response IOCB before handling the
+ command. */
+
+ mp = (struct lpfc_dmabuf *) (iocb->context2);
+ mp = list_get_first(&mp->list,
+ struct lpfc_dmabuf,
+ list);
+ if (mp) {
+ /* Delay before releasing rsp buffer to
+ * give UNREG mbox a chance to take
+ * effect.
+ */
+ list_add(&mp->list,
+ &phba->freebufList);
+ }
+ lpfc_mbuf_free(phba,
+ ((struct lpfc_dmabuf *)
+ iocb->context2)->virt,
+ ((struct lpfc_dmabuf *)
+ iocb->context2)->phys);
+ kfree(iocb->context2);
+ }
+
+ if (iocb->context3) {
+ lpfc_mbuf_free(phba,
+ ((struct lpfc_dmabuf *)
+ iocb->context3)->virt,
+ ((struct lpfc_dmabuf *)
+ iocb->context3)->phys);
+ kfree(iocb->context3);
+ }
+ }
+ }
+
+ return;
+}
+
+void
+lpfc_disc_flush_list(struct lpfc_hba * phba)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ if (phba->fc_plogi_cnt) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+ nlp_listp) {
+ lpfc_free_tx(phba, ndlp);
+ lpfc_nlp_remove(phba, ndlp);
+ }
+ }
+ if (phba->fc_adisc_cnt) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ lpfc_free_tx(phba, ndlp);
+ lpfc_nlp_remove(phba, ndlp);
+ }
+ }
+ return;
+}
+
+/*****************************************************************************/
+/*
+ * NAME: lpfc_disc_timeout
+ *
+ * FUNCTION: Fibre Channel driver discovery timeout routine.
+ *
+ * EXECUTION ENVIRONMENT: interrupt only
+ *
+ * CALLED FROM:
+ * Timer function
+ *
+ * RETURNS:
+ * none
+ */
+/*****************************************************************************/
+void
+lpfc_disc_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ unsigned long flags = 0;
+
+ if (unlikely(!phba))
+ return;
+
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
+ phba->work_hba_events |= WORKER_DISC_TMO;
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+ return;
+}
+
+static void
+lpfc_disc_timeout_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_nodelist *ndlp;
+ LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
+ int rc, clrlaerr = 0;
+
+ if (unlikely(!phba))
+ return;
+
+ if (!(phba->fc_flag & FC_DISC_TMO))
+ return;
+
+ psli = &phba->sli;
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+
+ switch (phba->hba_state) {
+
+ case LPFC_LOCAL_CFG_LINK:
+ /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
+ /* FAN timeout */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_DISCOVERY,
+ "%d:0221 FAN timeout\n",
+ phba->brd_no);
+
+ /* Forget about FAN, Start discovery by sending a FLOGI
+ * hba_state is identically LPFC_FLOGI while waiting for FLOGI
+ * cmpl
+ */
+ phba->hba_state = LPFC_FLOGI;
+ lpfc_set_disctmo(phba);
+ lpfc_initial_flogi(phba);
+ break;
+
+ case LPFC_FLOGI:
+ /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+ /* Initial FLOGI timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0222 Initial FLOGI timeout\n",
+ phba->brd_no);
+
+ /* Assume no Fabric and go on with discovery.
+ * Check for outstanding ELS FLOGI to abort.
+ */
+
+ /* FLOGI failed, so just use loop map to make discovery list */
+ lpfc_disc_list_loopmap(phba);
+
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ break;
+
+ case LPFC_FABRIC_CFG_LINK:
+ /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
+ NameServer login */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0223 Timeout while waiting for NameServer "
+ "login\n", phba->brd_no);
+
+ /* Next look for NameServer ndlp */
+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
+ if (ndlp)
+ lpfc_nlp_remove(phba, ndlp);
+ /* Start discovery */
+ lpfc_disc_start(phba);
+ break;
+
+ case LPFC_NS_QRY:
+ /* Check for wait for NameServer Rsp timeout */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0224 NameServer Query timeout "
+ "Data: x%x x%x\n",
+ phba->brd_no,
+ phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
+ NameServer_DID);
+ if (ndlp) {
+ if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ /* Try it one more time */
+ rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
+ if (rc == 0)
+ break;
+ }
+ phba->fc_ns_retry = 0;
+ }
+
+ /* Nothing to authenticate, so CLEAR_LA right now */
+ clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!clearlambox) {
+ clrlaerr = 1;
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0226 Device Discovery "
+ "completion error\n",
+ phba->brd_no);
+ phba->hba_state = LPFC_HBA_ERROR;
+ break;
+ }
+
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, clearlambox);
+ clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ rc = lpfc_sli_issue_mbox(phba, clearlambox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(clearlambox, phba->mbox_mem_pool);
+ clrlaerr = 1;
+ break;
+ }
+
+ /* Setup and issue mailbox INITIALIZE LINK command */
+ initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!initlinkmbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0226 Device Discovery "
+ "completion error\n",
+ phba->brd_no);
+ phba->hba_state = LPFC_HBA_ERROR;
+ break;
+ }
+
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
+ phba->cfg_link_speed);
+ initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(initlinkmbox, phba->mbox_mem_pool);
+
+ break;
+
+ case LPFC_DISC_AUTH:
+ /* Node Authentication timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0227 Node Authentication timeout\n",
+ phba->brd_no);
+ lpfc_disc_flush_list(phba);
+ clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!clearlambox) {
+ clrlaerr = 1;
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d:0226 Device Discovery "
+ "completion error\n",
+ phba->brd_no);
+ phba->hba_state = LPFC_HBA_ERROR;
+ break;
+ }
+ phba->hba_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, clearlambox);
+ clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ rc = lpfc_sli_issue_mbox(phba, clearlambox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(clearlambox, phba->mbox_mem_pool);
+ clrlaerr = 1;
+ }
+ break;
+
+ case LPFC_CLEAR_LA:
+ /* CLEAR LA timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0228 CLEAR LA timeout\n",
+ phba->brd_no);
+ clrlaerr = 1;
+ break;
+
+ case LPFC_HBA_READY:
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0231 RSCN timeout Data: x%x x%x\n",
+ phba->brd_no,
+ phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(phba);
+
+ lpfc_els_flush_rscn(phba);
+ lpfc_disc_flush_list(phba);
+ }
+ break;
+ }
+
+ if (clrlaerr) {
+ lpfc_disc_flush_list(phba);
+ psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ phba->hba_state = LPFC_HBA_READY;
+ }
+
+ return;
+}
+
+static void
+lpfc_nodev_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflag;
+ struct lpfc_work_evt *evtp;
+
+ ndlp = (struct lpfc_nodelist *)ptr;
+ phba = ndlp->nlp_phba;
+ evtp = &ndlp->nodev_timeout_evt;
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+ }
+ evtp->evt_arg1 = ndlp;
+ evtp->evt = LPFC_EVT_NODEV_TMO;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return;
+}
+
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+ pmb->context1 = NULL;
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+ ndlp->nlp_type |= NLP_FABRIC;
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+
+ /* Start issuing Fabric-Device Management Interface (FDMI)
+ * command to 0xfffffa (FDMI well known port)
+ */
+ if (phba->cfg_fdmi_on == 1) {
+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
+ } else {
+ /*
+ * Delay issuing FDMI command if fdmi-on=2
+ * (supporting RPA/hostnmae)
+ */
+ mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
+ }
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free( pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
+/*
+ * This routine looks up the ndlp hash
+ * table for the given RPI. If rpi found
+ * it return the node list pointer
+ * else return 0.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
+{
+ struct lpfc_nodelist *ret;
+
+ ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
+ while ((ret != 0) && (ret->nlp_rpi != rpi)) {
+ ret = ret->nlp_rpi_hash_next;
+ }
+ return ret;
+}
+
+/*
+ * This routine looks up the ndlp hash table for the
+ * given RPI. If rpi found it return the node list
+ * pointer else return 0 after deleting the entry
+ * from hash table.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
+{
+ struct lpfc_nodelist *ret, *temp;;
+
+ ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
+ if (ret == 0)
+ return NULL;
+
+ if (ret->nlp_rpi == rpi) {
+ phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
+ ret->nlp_rpi_hash_next;
+ ret->nlp_rpi_hash_next = NULL;
+ return ret;
+ }
+
+ while ((ret->nlp_rpi_hash_next != 0) &&
+ (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
+ ret = ret->nlp_rpi_hash_next;
+ }
+
+ if (ret->nlp_rpi_hash_next != 0) {
+ temp = ret->nlp_rpi_hash_next;
+ ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
+ temp->nlp_rpi_hash_next = NULL;
+ return temp;
+ } else {
+ return NULL;
+ }
+}
+
+/*
+ * This routine adds the node list entry to the
+ * ndlp hash table.
+ */
+void
+lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint16_t rpi)
+{
+
+ uint32_t index;
+
+ index = LPFC_RPI_HASH_FUNC(rpi);
+ ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
+ phba->fc_nlplookup[index] = ndlp;
+ return;
+}
+
+void
+lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ uint32_t did)
+{
+ memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+ INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ init_timer(&ndlp->nlp_tmofunc);
+ ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
+ ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
+ init_timer(&ndlp->nlp_delayfunc);
+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ ndlp->nlp_DID = did;
+ ndlp->nlp_phba = phba;
+ ndlp->nlp_sid = NLP_NO_SID;
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
new file mode 100644
index 00000000000..fc958a99dad
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -0,0 +1,2687 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_hw.h 1.37 2005/03/29 19:51:45EST sf_support Exp $
+ */
+
+#define FDMI_DID 0xfffffaU
+#define NameServer_DID 0xfffffcU
+#define SCR_DID 0xfffffdU
+#define Fabric_DID 0xfffffeU
+#define Bcast_DID 0xffffffU
+#define Mask_DID 0xffffffU
+#define CT_DID_MASK 0xffff00U
+#define Fabric_DID_MASK 0xfff000U
+#define WELL_KNOWN_DID_MASK 0xfffff0U
+
+#define PT2PT_LocalID 1
+#define PT2PT_RemoteID 2
+
+#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */
+#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */
+#define FF_DEF_RATOV 2 /* Default RA_TOV (2s) */
+#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */
+
+#define LPFC_BUF_RING0 64 /* Number of buffers to post to RING
+ 0 */
+
+#define FCELSSIZE 1024 /* maximum ELS transfer size */
+
+#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
+#define LPFC_IP_RING 1 /* ring 1 for IP commands */
+#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
+#define LPFC_FCP_NEXT_RING 3
+
+#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
+#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */
+#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
+#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
+#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
+#define SLI2_IOCB_RSP_R2_ENTRIES 20 /* SLI-2 ELS response ring entries */
+#define SLI2_IOCB_CMD_R3_ENTRIES 0
+#define SLI2_IOCB_RSP_R3_ENTRIES 0
+#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
+#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+
+/* Common Transport structures and definitions */
+
+union CtRevisionId {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t Revision:8;
+ uint32_t InId:24;
+ } bits;
+ uint32_t word;
+};
+
+union CtCommandResponse {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t CmdRsp:16;
+ uint32_t Size:16;
+ } bits;
+ uint32_t word;
+};
+
+struct lpfc_sli_ct_request {
+ /* Structure is in Big Endian format */
+ union CtRevisionId RevisionId;
+ uint8_t FsType;
+ uint8_t FsSubType;
+ uint8_t Options;
+ uint8_t Rsrvd1;
+ union CtCommandResponse CommandResponse;
+ uint8_t Rsrvd2;
+ uint8_t ReasonCode;
+ uint8_t Explanation;
+ uint8_t VendorUnique;
+
+ union {
+ uint32_t PortID;
+ struct gid {
+ uint8_t PortType; /* for GID_PT requests */
+ uint8_t DomainScope;
+ uint8_t AreaScope;
+ uint8_t Fc4Type; /* for GID_FT requests */
+ } gid;
+ struct rft {
+ uint32_t PortId; /* For RFT_ID requests */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd0:16;
+ uint32_t rsvd1:7;
+ uint32_t fcpReg:1; /* Type 8 */
+ uint32_t rsvd2:2;
+ uint32_t ipReg:1; /* Type 5 */
+ uint32_t rsvd3:5;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rsvd0:16;
+ uint32_t fcpReg:1; /* Type 8 */
+ uint32_t rsvd1:7;
+ uint32_t rsvd3:5;
+ uint32_t ipReg:1; /* Type 5 */
+ uint32_t rsvd2:2;
+#endif
+
+ uint32_t rsvd[7];
+ } rft;
+ struct rnn {
+ uint32_t PortId; /* For RNN_ID requests */
+ uint8_t wwnn[8];
+ } rnn;
+ struct rsnn { /* For RSNN_ID requests */
+ uint8_t wwnn[8];
+ uint8_t len;
+ uint8_t symbname[255];
+ } rsnn;
+ } un;
+};
+
+#define SLI_CT_REVISION 1
+#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
+#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
+#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
+#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
+
+/*
+ * FsType Definitions
+ */
+
+#define SLI_CT_MANAGEMENT_SERVICE 0xFA
+#define SLI_CT_TIME_SERVICE 0xFB
+#define SLI_CT_DIRECTORY_SERVICE 0xFC
+#define SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
+
+/*
+ * Directory Service Subtypes
+ */
+
+#define SLI_CT_DIRECTORY_NAME_SERVER 0x02
+
+/*
+ * Response Codes
+ */
+
+#define SLI_CT_RESPONSE_FS_RJT 0x8001
+#define SLI_CT_RESPONSE_FS_ACC 0x8002
+
+/*
+ * Reason Codes
+ */
+
+#define SLI_CT_NO_ADDITIONAL_EXPL 0x0
+#define SLI_CT_INVALID_COMMAND 0x01
+#define SLI_CT_INVALID_VERSION 0x02
+#define SLI_CT_LOGICAL_ERROR 0x03
+#define SLI_CT_INVALID_IU_SIZE 0x04
+#define SLI_CT_LOGICAL_BUSY 0x05
+#define SLI_CT_PROTOCOL_ERROR 0x07
+#define SLI_CT_UNABLE_TO_PERFORM_REQ 0x09
+#define SLI_CT_REQ_NOT_SUPPORTED 0x0b
+#define SLI_CT_HBA_INFO_NOT_REGISTERED 0x10
+#define SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE 0x11
+#define SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN 0x12
+#define SLI_CT_HBA_ATTR_NOT_PRESENT 0x13
+#define SLI_CT_PORT_INFO_NOT_REGISTERED 0x20
+#define SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
+#define SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN 0x22
+#define SLI_CT_VENDOR_UNIQUE 0xff
+
+/*
+ * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
+ */
+
+#define SLI_CT_NO_PORT_ID 0x01
+#define SLI_CT_NO_PORT_NAME 0x02
+#define SLI_CT_NO_NODE_NAME 0x03
+#define SLI_CT_NO_CLASS_OF_SERVICE 0x04
+#define SLI_CT_NO_IP_ADDRESS 0x05
+#define SLI_CT_NO_IPA 0x06
+#define SLI_CT_NO_FC4_TYPES 0x07
+#define SLI_CT_NO_SYMBOLIC_PORT_NAME 0x08
+#define SLI_CT_NO_SYMBOLIC_NODE_NAME 0x09
+#define SLI_CT_NO_PORT_TYPE 0x0A
+#define SLI_CT_ACCESS_DENIED 0x10
+#define SLI_CT_INVALID_PORT_ID 0x11
+#define SLI_CT_DATABASE_EMPTY 0x12
+
+/*
+ * Name Server Command Codes
+ */
+
+#define SLI_CTNS_GA_NXT 0x0100
+#define SLI_CTNS_GPN_ID 0x0112
+#define SLI_CTNS_GNN_ID 0x0113
+#define SLI_CTNS_GCS_ID 0x0114
+#define SLI_CTNS_GFT_ID 0x0117
+#define SLI_CTNS_GSPN_ID 0x0118
+#define SLI_CTNS_GPT_ID 0x011A
+#define SLI_CTNS_GID_PN 0x0121
+#define SLI_CTNS_GID_NN 0x0131
+#define SLI_CTNS_GIP_NN 0x0135
+#define SLI_CTNS_GIPA_NN 0x0136
+#define SLI_CTNS_GSNN_NN 0x0139
+#define SLI_CTNS_GNN_IP 0x0153
+#define SLI_CTNS_GIPA_IP 0x0156
+#define SLI_CTNS_GID_FT 0x0171
+#define SLI_CTNS_GID_PT 0x01A1
+#define SLI_CTNS_RPN_ID 0x0212
+#define SLI_CTNS_RNN_ID 0x0213
+#define SLI_CTNS_RCS_ID 0x0214
+#define SLI_CTNS_RFT_ID 0x0217
+#define SLI_CTNS_RSPN_ID 0x0218
+#define SLI_CTNS_RPT_ID 0x021A
+#define SLI_CTNS_RIP_NN 0x0235
+#define SLI_CTNS_RIPA_NN 0x0236
+#define SLI_CTNS_RSNN_NN 0x0239
+#define SLI_CTNS_DA_ID 0x0300
+
+/*
+ * Port Types
+ */
+
+#define SLI_CTPT_N_PORT 0x01
+#define SLI_CTPT_NL_PORT 0x02
+#define SLI_CTPT_FNL_PORT 0x03
+#define SLI_CTPT_IP 0x04
+#define SLI_CTPT_FCP 0x08
+#define SLI_CTPT_NX_PORT 0x7F
+#define SLI_CTPT_F_PORT 0x81
+#define SLI_CTPT_FL_PORT 0x82
+#define SLI_CTPT_E_PORT 0x84
+
+#define SLI_CT_LAST_ENTRY 0x80000000
+
+/* Fibre Channel Service Parameter definitions */
+
+#define FC_PH_4_0 6 /* FC-PH version 4.0 */
+#define FC_PH_4_1 7 /* FC-PH version 4.1 */
+#define FC_PH_4_2 8 /* FC-PH version 4.2 */
+#define FC_PH_4_3 9 /* FC-PH version 4.3 */
+
+#define FC_PH_LOW 8 /* Lowest supported FC-PH version */
+#define FC_PH_HIGH 9 /* Highest supported FC-PH version */
+#define FC_PH3 0x20 /* FC-PH-3 version */
+
+#define FF_FRAME_SIZE 2048
+
+struct lpfc_name {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
+#endif
+
+#define NAME_IEEE 0x1 /* IEEE name - nameType */
+#define NAME_IEEE_EXT 0x2 /* IEEE extended name */
+#define NAME_FC_TYPE 0x3 /* FC native name type */
+#define NAME_IP_TYPE 0x4 /* IP address */
+#define NAME_CCITT_TYPE 0xC
+#define NAME_CCITT_GR_TYPE 0xE
+ uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
+ uint8_t IEEE[6]; /* FC IEEE address */
+};
+
+struct csp {
+ uint8_t fcphHigh; /* FC Word 0, byte 0 */
+ uint8_t fcphLow;
+ uint8_t bbCreditMsb;
+ uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
+ uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t fPort:1; /* FC Word 1, bit 28 */
+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
+ uint16_t multicast:1; /* FC Word 1, bit 25 */
+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
+
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t multicast:1; /* FC Word 1, bit 25 */
+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
+ uint16_t fPort:1; /* FC Word 1, bit 28 */
+ uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
+ uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+#endif
+
+ uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
+ uint8_t bbRcvSizeLsb; /* FC Word 1, byte 3 */
+ union {
+ struct {
+ uint8_t word2Reserved1; /* FC Word 2 byte 0 */
+
+ uint8_t totalConcurrSeq; /* FC Word 2 byte 1 */
+ uint8_t roByCategoryMsb; /* FC Word 2 byte 2 */
+
+ uint8_t roByCategoryLsb; /* FC Word 2 byte 3 */
+ } nPort;
+ uint32_t r_a_tov; /* R_A_TOV must be in B.E. format */
+ } w2;
+
+ uint32_t e_d_tov; /* E_D_TOV must be in B.E. format */
+};
+
+struct class_parms {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t classValid:1; /* FC Word 0, bit 31 */
+ uint8_t intermix:1; /* FC Word 0, bit 30 */
+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
+ uint8_t intermix:1; /* FC Word 0, bit 30 */
+ uint8_t classValid:1; /* FC Word 0, bit 31 */
+
+#endif
+
+ uint8_t word0Reserved2; /* FC Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
+#endif
+
+ uint8_t word0Reserved4; /* FC Word 0, bit 0: 7 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
+#endif
+
+ uint8_t word1Reserved2; /* FC Word 1, bit 16:23 */
+ uint8_t rcvDataSizeMsb; /* FC Word 1, bit 8:15 */
+ uint8_t rcvDataSizeLsb; /* FC Word 1, bit 0: 7 */
+
+ uint8_t concurrentSeqMsb; /* FC Word 2, bit 24:31 */
+ uint8_t concurrentSeqLsb; /* FC Word 2, bit 16:23 */
+ uint8_t EeCreditSeqMsb; /* FC Word 2, bit 8:15 */
+ uint8_t EeCreditSeqLsb; /* FC Word 2, bit 0: 7 */
+
+ uint8_t openSeqPerXchgMsb; /* FC Word 3, bit 24:31 */
+ uint8_t openSeqPerXchgLsb; /* FC Word 3, bit 16:23 */
+ uint8_t word3Reserved1; /* Fc Word 3, bit 8:15 */
+ uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
+};
+
+struct serv_parm { /* Structure is in Big Endian format */
+ struct csp cmn;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ struct class_parms cls1;
+ struct class_parms cls2;
+ struct class_parms cls3;
+ struct class_parms cls4;
+ uint8_t vendorVersion[16];
+};
+
+/*
+ * Extended Link Service LS_COMMAND codes (Payload Word 0)
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define ELS_CMD_MASK 0xffff0000
+#define ELS_RSP_MASK 0xff000000
+#define ELS_CMD_LS_RJT 0x01000000
+#define ELS_CMD_ACC 0x02000000
+#define ELS_CMD_PLOGI 0x03000000
+#define ELS_CMD_FLOGI 0x04000000
+#define ELS_CMD_LOGO 0x05000000
+#define ELS_CMD_ABTX 0x06000000
+#define ELS_CMD_RCS 0x07000000
+#define ELS_CMD_RES 0x08000000
+#define ELS_CMD_RSS 0x09000000
+#define ELS_CMD_RSI 0x0A000000
+#define ELS_CMD_ESTS 0x0B000000
+#define ELS_CMD_ESTC 0x0C000000
+#define ELS_CMD_ADVC 0x0D000000
+#define ELS_CMD_RTV 0x0E000000
+#define ELS_CMD_RLS 0x0F000000
+#define ELS_CMD_ECHO 0x10000000
+#define ELS_CMD_TEST 0x11000000
+#define ELS_CMD_RRQ 0x12000000
+#define ELS_CMD_PRLI 0x20100014
+#define ELS_CMD_PRLO 0x21100014
+#define ELS_CMD_PDISC 0x50000000
+#define ELS_CMD_FDISC 0x51000000
+#define ELS_CMD_ADISC 0x52000000
+#define ELS_CMD_FARP 0x54000000
+#define ELS_CMD_FARPR 0x55000000
+#define ELS_CMD_FAN 0x60000000
+#define ELS_CMD_RSCN 0x61040000
+#define ELS_CMD_SCR 0x62000000
+#define ELS_CMD_RNID 0x78000000
+#else /* __LITTLE_ENDIAN_BITFIELD */
+#define ELS_CMD_MASK 0xffff
+#define ELS_RSP_MASK 0xff
+#define ELS_CMD_LS_RJT 0x01
+#define ELS_CMD_ACC 0x02
+#define ELS_CMD_PLOGI 0x03
+#define ELS_CMD_FLOGI 0x04
+#define ELS_CMD_LOGO 0x05
+#define ELS_CMD_ABTX 0x06
+#define ELS_CMD_RCS 0x07
+#define ELS_CMD_RES 0x08
+#define ELS_CMD_RSS 0x09
+#define ELS_CMD_RSI 0x0A
+#define ELS_CMD_ESTS 0x0B
+#define ELS_CMD_ESTC 0x0C
+#define ELS_CMD_ADVC 0x0D
+#define ELS_CMD_RTV 0x0E
+#define ELS_CMD_RLS 0x0F
+#define ELS_CMD_ECHO 0x10
+#define ELS_CMD_TEST 0x11
+#define ELS_CMD_RRQ 0x12
+#define ELS_CMD_PRLI 0x14001020
+#define ELS_CMD_PRLO 0x14001021
+#define ELS_CMD_PDISC 0x50
+#define ELS_CMD_FDISC 0x51
+#define ELS_CMD_ADISC 0x52
+#define ELS_CMD_FARP 0x54
+#define ELS_CMD_FARPR 0x55
+#define ELS_CMD_FAN 0x60
+#define ELS_CMD_RSCN 0x0461
+#define ELS_CMD_SCR 0x62
+#define ELS_CMD_RNID 0x78
+#endif
+
+/*
+ * LS_RJT Payload Definition
+ */
+
+struct ls_rjt { /* Structure is in Big Endian format */
+ union {
+ uint32_t lsRjtError;
+ struct {
+ uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
+
+ uint8_t lsRjtRsnCode; /* FC Word 0, bit 16:23 */
+ /* LS_RJT reason codes */
+#define LSRJT_INVALID_CMD 0x01
+#define LSRJT_LOGICAL_ERR 0x03
+#define LSRJT_LOGICAL_BSY 0x05
+#define LSRJT_PROTOCOL_ERR 0x07
+#define LSRJT_UNABLE_TPC 0x09 /* Unable to perform command */
+#define LSRJT_CMD_UNSUPPORTED 0x0B
+#define LSRJT_VENDOR_UNIQUE 0xFF /* See Byte 3 */
+
+ uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
+ /* LS_RJT reason explanation */
+#define LSEXP_NOTHING_MORE 0x00
+#define LSEXP_SPARM_OPTIONS 0x01
+#define LSEXP_SPARM_ICTL 0x03
+#define LSEXP_SPARM_RCTL 0x05
+#define LSEXP_SPARM_RCV_SIZE 0x07
+#define LSEXP_SPARM_CONCUR_SEQ 0x09
+#define LSEXP_SPARM_CREDIT 0x0B
+#define LSEXP_INVALID_PNAME 0x0D
+#define LSEXP_INVALID_NNAME 0x0E
+#define LSEXP_INVALID_CSP 0x0F
+#define LSEXP_INVALID_ASSOC_HDR 0x11
+#define LSEXP_ASSOC_HDR_REQ 0x13
+#define LSEXP_INVALID_O_SID 0x15
+#define LSEXP_INVALID_OX_RX 0x17
+#define LSEXP_CMD_IN_PROGRESS 0x19
+#define LSEXP_INVALID_NPORT_ID 0x1F
+#define LSEXP_INVALID_SEQ_ID 0x21
+#define LSEXP_INVALID_XCHG 0x23
+#define LSEXP_INACTIVE_XCHG 0x25
+#define LSEXP_RQ_REQUIRED 0x27
+#define LSEXP_OUT_OF_RESOURCE 0x29
+#define LSEXP_CANT_GIVE_DATA 0x2A
+#define LSEXP_REQ_UNSUPPORTED 0x2C
+ uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
+ } b;
+ } un;
+};
+
+/*
+ * N_Port Login (FLOGO/PLOGO Request) Payload Definition
+ */
+
+typedef struct _LOGO { /* Structure is in Big Endian format */
+ union {
+ uint32_t nPortId32; /* Access nPortId as a word */
+ struct {
+ uint8_t word1Reserved1; /* FC Word 1, bit 31:24 */
+ uint8_t nPortIdByte0; /* N_port ID bit 16:23 */
+ uint8_t nPortIdByte1; /* N_port ID bit 8:15 */
+ uint8_t nPortIdByte2; /* N_port ID bit 0: 7 */
+ } b;
+ } un;
+ struct lpfc_name portName; /* N_port name field */
+} LOGO;
+
+/*
+ * FCP Login (PRLI Request / ACC) Payload Definition
+ */
+
+#define PRLX_PAGE_LEN 0x10
+#define TPRLO_PAGE_LEN 0x14
+
+typedef struct _PRLI { /* Structure is in Big Endian format */
+ uint8_t prliType; /* FC Parm Word 0, bit 24:31 */
+
+#define PRLI_FCP_TYPE 0x08
+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
+
+ /* ACC = imagePairEstablished */
+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ /* ACC = imagePairEstablished */
+#endif
+
+#define PRLI_REQ_EXECUTED 0x1 /* acceptRspCode */
+#define PRLI_NO_RESOURCES 0x2
+#define PRLI_INIT_INCOMPLETE 0x3
+#define PRLI_NO_SUCH_PA 0x4
+#define PRLI_PREDEF_CONFIG 0x5
+#define PRLI_PARTIAL_SUCCESS 0x6
+#define PRLI_INVALID_PAGE_CNT 0x7
+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+ uint8_t word3Reserved1; /* FC Parm Word 3, bit 24:31 */
+ uint8_t word3Reserved2; /* FC Parm Word 3, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
+#endif
+} PRLI;
+
+/*
+ * FCP Logout (PRLO Request / ACC) Payload Definition
+ */
+
+typedef struct _PRLO { /* Structure is in Big Endian format */
+ uint8_t prloType; /* FC Parm Word 0, bit 24:31 */
+
+#define PRLO_FCP_TYPE 0x08
+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
+#endif
+
+#define PRLO_REQ_EXECUTED 0x1 /* acceptRspCode */
+#define PRLO_NO_SUCH_IMAGE 0x4
+#define PRLO_INVALID_PAGE_CNT 0x7
+
+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
+
+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
+
+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
+
+ uint32_t word3Reserved1; /* FC Parm Word 3, bit 0:31 */
+} PRLO;
+
+typedef struct _ADISC { /* Structure is in Big Endian format */
+ uint32_t hardAL_PA;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ uint32_t DID;
+} ADISC;
+
+typedef struct _FARP { /* Structure is in Big Endian format */
+ uint32_t Mflags:8;
+ uint32_t Odid:24;
+#define FARP_NO_ACTION 0 /* FARP information enclosed, no
+ action */
+#define FARP_MATCH_PORT 0x1 /* Match on Responder Port Name */
+#define FARP_MATCH_NODE 0x2 /* Match on Responder Node Name */
+#define FARP_MATCH_IP 0x4 /* Match on IP address, not supported */
+#define FARP_MATCH_IPV4 0x5 /* Match on IPV4 address, not
+ supported */
+#define FARP_MATCH_IPV6 0x6 /* Match on IPV6 address, not
+ supported */
+ uint32_t Rflags:8;
+ uint32_t Rdid:24;
+#define FARP_REQUEST_PLOGI 0x1 /* Request for PLOGI */
+#define FARP_REQUEST_FARPR 0x2 /* Request for FARP Response */
+ struct lpfc_name OportName;
+ struct lpfc_name OnodeName;
+ struct lpfc_name RportName;
+ struct lpfc_name RnodeName;
+ uint8_t Oipaddr[16];
+ uint8_t Ripaddr[16];
+} FARP;
+
+typedef struct _FAN { /* Structure is in Big Endian format */
+ uint32_t Fdid;
+ struct lpfc_name FportName;
+ struct lpfc_name FnodeName;
+} FAN;
+
+typedef struct _SCR { /* Structure is in Big Endian format */
+ uint8_t resvd1;
+ uint8_t resvd2;
+ uint8_t resvd3;
+ uint8_t Function;
+#define SCR_FUNC_FABRIC 0x01
+#define SCR_FUNC_NPORT 0x02
+#define SCR_FUNC_FULL 0x03
+#define SCR_CLEAR 0xff
+} SCR;
+
+typedef struct _RNID_TOP_DISC {
+ struct lpfc_name portName;
+ uint8_t resvd[8];
+ uint32_t unitType;
+#define RNID_HBA 0x7
+#define RNID_HOST 0xa
+#define RNID_DRIVER 0xd
+ uint32_t physPort;
+ uint32_t attachedNodes;
+ uint16_t ipVersion;
+#define RNID_IPV4 0x1
+#define RNID_IPV6 0x2
+ uint16_t UDPport;
+ uint8_t ipAddr[16];
+ uint16_t resvd1;
+ uint16_t flags;
+#define RNID_TD_SUPPORT 0x1
+#define RNID_LP_VALID 0x2
+} RNID_TOP_DISC;
+
+typedef struct _RNID { /* Structure is in Big Endian format */
+ uint8_t Format;
+#define RNID_TOPOLOGY_DISC 0xdf
+ uint8_t CommonLen;
+ uint8_t resvd1;
+ uint8_t SpecificLen;
+ struct lpfc_name portName;
+ struct lpfc_name nodeName;
+ union {
+ RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
+ } un;
+} RNID;
+
+typedef struct _RRQ { /* Structure is in Big Endian format */
+ uint32_t SID;
+ uint16_t Oxid;
+ uint16_t Rxid;
+ uint8_t resv[32]; /* optional association hdr */
+} RRQ;
+
+/* This is used for RSCN command */
+typedef struct _D_ID { /* Structure is in Big Endian format */
+ union {
+ uint32_t word;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t resv;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t id;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t id;
+ uint8_t area;
+ uint8_t domain;
+ uint8_t resv;
+#endif
+ } b;
+ } un;
+} D_ID;
+
+/*
+ * Structure to define all ELS Payload types
+ */
+
+typedef struct _ELS_PKT { /* Structure is in Big Endian format */
+ uint8_t elsCode; /* FC Word 0, bit 24:31 */
+ uint8_t elsByte1;
+ uint8_t elsByte2;
+ uint8_t elsByte3;
+ union {
+ struct ls_rjt lsRjt; /* Payload for LS_RJT ELS response */
+ struct serv_parm logi; /* Payload for PLOGI/FLOGI/PDISC/ACC */
+ LOGO logo; /* Payload for PLOGO/FLOGO/ACC */
+ PRLI prli; /* Payload for PRLI/ACC */
+ PRLO prlo; /* Payload for PRLO/ACC */
+ ADISC adisc; /* Payload for ADISC/ACC */
+ FARP farp; /* Payload for FARP/ACC */
+ FAN fan; /* Payload for FAN */
+ SCR scr; /* Payload for SCR/ACC */
+ RRQ rrq; /* Payload for RRQ */
+ RNID rnid; /* Payload for RNID */
+ uint8_t pad[128 - 4]; /* Pad out to payload of 128 bytes */
+ } un;
+} ELS_PKT;
+
+/*
+ * FDMI
+ * HBA MAnagement Operations Command Codes
+ */
+#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
+#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
+#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
+#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
+#define SLI_MGMT_RHBA 0x200 /* Register HBA */
+#define SLI_MGMT_RHAT 0x201 /* Register HBA atttributes */
+#define SLI_MGMT_RPRT 0x210 /* Register Port */
+#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
+#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
+#define SLI_MGMT_DPRT 0x310 /* De-register Port */
+
+/*
+ * Management Service Subtypes
+ */
+#define SLI_CT_FDMI_Subtypes 0x10
+
+/*
+ * HBA Management Service Reject Code
+ */
+#define REJECT_CODE 0x9 /* Unable to perform command request */
+
+/*
+ * HBA Management Service Reject Reason Code
+ * Please refer to the Reason Codes above
+ */
+
+/*
+ * HBA Attribute Types
+ */
+#define NODE_NAME 0x1
+#define MANUFACTURER 0x2
+#define SERIAL_NUMBER 0x3
+#define MODEL 0x4
+#define MODEL_DESCRIPTION 0x5
+#define HARDWARE_VERSION 0x6
+#define DRIVER_VERSION 0x7
+#define OPTION_ROM_VERSION 0x8
+#define FIRMWARE_VERSION 0x9
+#define OS_NAME_VERSION 0xa
+#define MAX_CT_PAYLOAD_LEN 0xb
+
+/*
+ * Port Attrubute Types
+ */
+#define SUPPORTED_FC4_TYPES 0x1
+#define SUPPORTED_SPEED 0x2
+#define PORT_SPEED 0x3
+#define MAX_FRAME_SIZE 0x4
+#define OS_DEVICE_NAME 0x5
+#define HOST_NAME 0x6
+
+union AttributesDef {
+ /* Structure is in Big Endian format */
+ struct {
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ } bits;
+ uint32_t word;
+};
+
+
+/*
+ * HBA Attribute Entry (8 - 260 bytes)
+ */
+typedef struct {
+ union AttributesDef ad;
+ union {
+ uint32_t VendorSpecific;
+ uint8_t Manufacturer[64];
+ uint8_t SerialNumber[64];
+ uint8_t Model[256];
+ uint8_t ModelDescription[256];
+ uint8_t HardwareVersion[256];
+ uint8_t DriverVersion[256];
+ uint8_t OptionROMVersion[256];
+ uint8_t FirmwareVersion[256];
+ struct lpfc_name NodeName;
+ uint8_t SupportFC4Types[32];
+ uint32_t SupportSpeed;
+ uint32_t PortSpeed;
+ uint32_t MaxFrameSize;
+ uint8_t OsDeviceName[256];
+ uint8_t OsNameVersion[256];
+ uint32_t MaxCTPayloadLen;
+ uint8_t HostName[256];
+ } un;
+} ATTRIBUTE_ENTRY;
+
+/*
+ * HBA Attribute Block
+ */
+typedef struct {
+ uint32_t EntryCnt; /* Number of HBA attribute entries */
+ ATTRIBUTE_ENTRY Entry; /* Variable-length array */
+} ATTRIBUTE_BLOCK;
+
+/*
+ * Port Entry
+ */
+typedef struct {
+ struct lpfc_name PortName;
+} PORT_ENTRY;
+
+/*
+ * HBA Identifier
+ */
+typedef struct {
+ struct lpfc_name PortName;
+} HBA_IDENTIFIER;
+
+/*
+ * Registered Port List Format
+ */
+typedef struct {
+ uint32_t EntryCnt;
+ PORT_ENTRY pe; /* Variable-length array */
+} REG_PORT_LIST;
+
+/*
+ * Register HBA(RHBA)
+ */
+typedef struct {
+ HBA_IDENTIFIER hi;
+ REG_PORT_LIST rpl; /* variable-length array */
+/* ATTRIBUTE_BLOCK ab; */
+} REG_HBA;
+
+/*
+ * Register HBA Attributes (RHAT)
+ */
+typedef struct {
+ struct lpfc_name HBA_PortName;
+ ATTRIBUTE_BLOCK ab;
+} REG_HBA_ATTRIBUTE;
+
+/*
+ * Register Port Attributes (RPA)
+ */
+typedef struct {
+ struct lpfc_name PortName;
+ ATTRIBUTE_BLOCK ab;
+} REG_PORT_ATTRIBUTE;
+
+/*
+ * Get Registered HBA List (GRHL) Accept Payload Format
+ */
+typedef struct {
+ uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
+ struct lpfc_name HBA_PortName; /* Variable-length array */
+} GRHL_ACC_PAYLOAD;
+
+/*
+ * Get Registered Port List (GRPL) Accept Payload Format
+ */
+typedef struct {
+ uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
+ PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */
+} GRPL_ACC_PAYLOAD;
+
+/*
+ * Get Port Attributes (GPAT) Accept Payload Format
+ */
+
+typedef struct {
+ ATTRIBUTE_BLOCK pab;
+} GPAT_ACC_PAYLOAD;
+
+
+/*
+ * Begin HBA configuration parameters.
+ * The PCI configuration register BAR assignments are:
+ * BAR0, offset 0x10 - SLIM base memory address
+ * BAR1, offset 0x14 - SLIM base memory high address
+ * BAR2, offset 0x18 - REGISTER base memory address
+ * BAR3, offset 0x1c - REGISTER base memory high address
+ * BAR4, offset 0x20 - BIU I/O registers
+ * BAR5, offset 0x24 - REGISTER base io high address
+ */
+
+/* Number of rings currently used and available. */
+#define MAX_CONFIGURED_RINGS 3
+#define MAX_RINGS 4
+
+/* IOCB / Mailbox is owned by FireFly */
+#define OWN_CHIP 1
+
+/* IOCB / Mailbox is owned by Host */
+#define OWN_HOST 0
+
+/* Number of 4-byte words in an IOCB. */
+#define IOCB_WORD_SZ 8
+
+/* defines for type field in fc header */
+#define FC_ELS_DATA 0x1
+#define FC_LLC_SNAP 0x5
+#define FC_FCP_DATA 0x8
+#define FC_COMMON_TRANSPORT_ULP 0x20
+
+/* defines for rctl field in fc header */
+#define FC_DEV_DATA 0x0
+#define FC_UNSOL_CTL 0x2
+#define FC_SOL_CTL 0x3
+#define FC_UNSOL_DATA 0x4
+#define FC_FCP_CMND 0x6
+#define FC_ELS_REQ 0x22
+#define FC_ELS_RSP 0x23
+
+/* network headers for Dfctl field */
+#define FC_NET_HDR 0x20
+
+/* Start FireFly Register definitions */
+#define PCI_VENDOR_ID_EMULEX 0x10df
+#define PCI_DEVICE_ID_FIREFLY 0x1ae5
+#define PCI_DEVICE_ID_SUPERFLY 0xf700
+#define PCI_DEVICE_ID_DRAGONFLY 0xf800
+#define PCI_DEVICE_ID_RFLY 0xf095
+#define PCI_DEVICE_ID_PFLY 0xf098
+#define PCI_DEVICE_ID_TFLY 0xf0a5
+#define PCI_DEVICE_ID_CENTAUR 0xf900
+#define PCI_DEVICE_ID_PEGASUS 0xf980
+#define PCI_DEVICE_ID_THOR 0xfa00
+#define PCI_DEVICE_ID_VIPER 0xfb00
+#define PCI_DEVICE_ID_HELIOS 0xfd00
+#define PCI_DEVICE_ID_BMID 0xf0d5
+#define PCI_DEVICE_ID_BSMB 0xf0d1
+#define PCI_DEVICE_ID_ZEPHYR 0xfe00
+#define PCI_DEVICE_ID_ZMID 0xf0e5
+#define PCI_DEVICE_ID_ZSMB 0xf0e1
+#define PCI_DEVICE_ID_LP101 0xf0a1
+#define PCI_DEVICE_ID_LP10000S 0xfc00
+
+#define JEDEC_ID_ADDRESS 0x0080001c
+#define FIREFLY_JEDEC_ID 0x1ACC
+#define SUPERFLY_JEDEC_ID 0x0020
+#define DRAGONFLY_JEDEC_ID 0x0021
+#define DRAGONFLY_V2_JEDEC_ID 0x0025
+#define CENTAUR_2G_JEDEC_ID 0x0026
+#define CENTAUR_1G_JEDEC_ID 0x0028
+#define PEGASUS_ORION_JEDEC_ID 0x0036
+#define PEGASUS_JEDEC_ID 0x0038
+#define THOR_JEDEC_ID 0x0012
+#define HELIOS_JEDEC_ID 0x0364
+#define ZEPHYR_JEDEC_ID 0x0577
+#define VIPER_JEDEC_ID 0x4838
+
+#define JEDEC_ID_MASK 0x0FFFF000
+#define JEDEC_ID_SHIFT 12
+#define FC_JEDEC_ID(id) ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
+
+typedef struct { /* FireFly BIU registers */
+ uint32_t hostAtt; /* See definitions for Host Attention
+ register */
+ uint32_t chipAtt; /* See definitions for Chip Attention
+ register */
+ uint32_t hostStatus; /* See definitions for Host Status register */
+ uint32_t hostControl; /* See definitions for Host Control register */
+ uint32_t buiConfig; /* See definitions for BIU configuration
+ register */
+} FF_REGS;
+
+/* IO Register size in bytes */
+#define FF_REG_AREA_SIZE 256
+
+/* Host Attention Register */
+
+#define HA_REG_OFFSET 0 /* Byte offset from register base address */
+
+#define HA_R0RE_REQ 0x00000001 /* Bit 0 */
+#define HA_R0CE_RSP 0x00000002 /* Bit 1 */
+#define HA_R0ATT 0x00000008 /* Bit 3 */
+#define HA_R1RE_REQ 0x00000010 /* Bit 4 */
+#define HA_R1CE_RSP 0x00000020 /* Bit 5 */
+#define HA_R1ATT 0x00000080 /* Bit 7 */
+#define HA_R2RE_REQ 0x00000100 /* Bit 8 */
+#define HA_R2CE_RSP 0x00000200 /* Bit 9 */
+#define HA_R2ATT 0x00000800 /* Bit 11 */
+#define HA_R3RE_REQ 0x00001000 /* Bit 12 */
+#define HA_R3CE_RSP 0x00002000 /* Bit 13 */
+#define HA_R3ATT 0x00008000 /* Bit 15 */
+#define HA_LATT 0x20000000 /* Bit 29 */
+#define HA_MBATT 0x40000000 /* Bit 30 */
+#define HA_ERATT 0x80000000 /* Bit 31 */
+
+#define HA_RXRE_REQ 0x00000001 /* Bit 0 */
+#define HA_RXCE_RSP 0x00000002 /* Bit 1 */
+#define HA_RXATT 0x00000008 /* Bit 3 */
+#define HA_RXMASK 0x0000000f
+
+/* Chip Attention Register */
+
+#define CA_REG_OFFSET 4 /* Byte offset from register base address */
+
+#define CA_R0CE_REQ 0x00000001 /* Bit 0 */
+#define CA_R0RE_RSP 0x00000002 /* Bit 1 */
+#define CA_R0ATT 0x00000008 /* Bit 3 */
+#define CA_R1CE_REQ 0x00000010 /* Bit 4 */
+#define CA_R1RE_RSP 0x00000020 /* Bit 5 */
+#define CA_R1ATT 0x00000080 /* Bit 7 */
+#define CA_R2CE_REQ 0x00000100 /* Bit 8 */
+#define CA_R2RE_RSP 0x00000200 /* Bit 9 */
+#define CA_R2ATT 0x00000800 /* Bit 11 */
+#define CA_R3CE_REQ 0x00001000 /* Bit 12 */
+#define CA_R3RE_RSP 0x00002000 /* Bit 13 */
+#define CA_R3ATT 0x00008000 /* Bit 15 */
+#define CA_MBATT 0x40000000 /* Bit 30 */
+
+/* Host Status Register */
+
+#define HS_REG_OFFSET 8 /* Byte offset from register base address */
+
+#define HS_MBRDY 0x00400000 /* Bit 22 */
+#define HS_FFRDY 0x00800000 /* Bit 23 */
+#define HS_FFER8 0x01000000 /* Bit 24 */
+#define HS_FFER7 0x02000000 /* Bit 25 */
+#define HS_FFER6 0x04000000 /* Bit 26 */
+#define HS_FFER5 0x08000000 /* Bit 27 */
+#define HS_FFER4 0x10000000 /* Bit 28 */
+#define HS_FFER3 0x20000000 /* Bit 29 */
+#define HS_FFER2 0x40000000 /* Bit 30 */
+#define HS_FFER1 0x80000000 /* Bit 31 */
+#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */
+
+/* Host Control Register */
+
+#define HC_REG_OFFSET 12 /* Word offset from register base address */
+
+#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
+#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
+#define HC_R1INT_ENA 0x00000004 /* Bit 2 */
+#define HC_R2INT_ENA 0x00000008 /* Bit 3 */
+#define HC_R3INT_ENA 0x00000010 /* Bit 4 */
+#define HC_INITHBI 0x02000000 /* Bit 25 */
+#define HC_INITMB 0x04000000 /* Bit 26 */
+#define HC_INITFF 0x08000000 /* Bit 27 */
+#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
+#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
+
+/* Mailbox Commands */
+#define MBX_SHUTDOWN 0x00 /* terminate testing */
+#define MBX_LOAD_SM 0x01
+#define MBX_READ_NV 0x02
+#define MBX_WRITE_NV 0x03
+#define MBX_RUN_BIU_DIAG 0x04
+#define MBX_INIT_LINK 0x05
+#define MBX_DOWN_LINK 0x06
+#define MBX_CONFIG_LINK 0x07
+#define MBX_CONFIG_RING 0x09
+#define MBX_RESET_RING 0x0A
+#define MBX_READ_CONFIG 0x0B
+#define MBX_READ_RCONFIG 0x0C
+#define MBX_READ_SPARM 0x0D
+#define MBX_READ_STATUS 0x0E
+#define MBX_READ_RPI 0x0F
+#define MBX_READ_XRI 0x10
+#define MBX_READ_REV 0x11
+#define MBX_READ_LNK_STAT 0x12
+#define MBX_REG_LOGIN 0x13
+#define MBX_UNREG_LOGIN 0x14
+#define MBX_READ_LA 0x15
+#define MBX_CLEAR_LA 0x16
+#define MBX_DUMP_MEMORY 0x17
+#define MBX_DUMP_CONTEXT 0x18
+#define MBX_RUN_DIAGS 0x19
+#define MBX_RESTART 0x1A
+#define MBX_UPDATE_CFG 0x1B
+#define MBX_DOWN_LOAD 0x1C
+#define MBX_DEL_LD_ENTRY 0x1D
+#define MBX_RUN_PROGRAM 0x1E
+#define MBX_SET_MASK 0x20
+#define MBX_SET_SLIM 0x21
+#define MBX_UNREG_D_ID 0x23
+#define MBX_CONFIG_FARP 0x25
+
+#define MBX_LOAD_AREA 0x81
+#define MBX_RUN_BIU_DIAG64 0x84
+#define MBX_CONFIG_PORT 0x88
+#define MBX_READ_SPARM64 0x8D
+#define MBX_READ_RPI64 0x8F
+#define MBX_REG_LOGIN64 0x93
+#define MBX_READ_LA64 0x95
+
+#define MBX_FLASH_WR_ULA 0x98
+#define MBX_SET_DEBUG 0x99
+#define MBX_LOAD_EXP_ROM 0x9C
+
+#define MBX_MAX_CMDS 0x9D
+#define MBX_SLI2_CMD_MASK 0x80
+
+/* IOCB Commands */
+
+#define CMD_RCV_SEQUENCE_CX 0x01
+#define CMD_XMIT_SEQUENCE_CR 0x02
+#define CMD_XMIT_SEQUENCE_CX 0x03
+#define CMD_XMIT_BCAST_CN 0x04
+#define CMD_XMIT_BCAST_CX 0x05
+#define CMD_QUE_RING_BUF_CN 0x06
+#define CMD_QUE_XRI_BUF_CX 0x07
+#define CMD_IOCB_CONTINUE_CN 0x08
+#define CMD_RET_XRI_BUF_CX 0x09
+#define CMD_ELS_REQUEST_CR 0x0A
+#define CMD_ELS_REQUEST_CX 0x0B
+#define CMD_RCV_ELS_REQ_CX 0x0D
+#define CMD_ABORT_XRI_CN 0x0E
+#define CMD_ABORT_XRI_CX 0x0F
+#define CMD_CLOSE_XRI_CN 0x10
+#define CMD_CLOSE_XRI_CX 0x11
+#define CMD_CREATE_XRI_CR 0x12
+#define CMD_CREATE_XRI_CX 0x13
+#define CMD_GET_RPI_CN 0x14
+#define CMD_XMIT_ELS_RSP_CX 0x15
+#define CMD_GET_RPI_CR 0x16
+#define CMD_XRI_ABORTED_CX 0x17
+#define CMD_FCP_IWRITE_CR 0x18
+#define CMD_FCP_IWRITE_CX 0x19
+#define CMD_FCP_IREAD_CR 0x1A
+#define CMD_FCP_IREAD_CX 0x1B
+#define CMD_FCP_ICMND_CR 0x1C
+#define CMD_FCP_ICMND_CX 0x1D
+
+#define CMD_ADAPTER_MSG 0x20
+#define CMD_ADAPTER_DUMP 0x22
+
+/* SLI_2 IOCB Command Set */
+
+#define CMD_RCV_SEQUENCE64_CX 0x81
+#define CMD_XMIT_SEQUENCE64_CR 0x82
+#define CMD_XMIT_SEQUENCE64_CX 0x83
+#define CMD_XMIT_BCAST64_CN 0x84
+#define CMD_XMIT_BCAST64_CX 0x85
+#define CMD_QUE_RING_BUF64_CN 0x86
+#define CMD_QUE_XRI_BUF64_CX 0x87
+#define CMD_IOCB_CONTINUE64_CN 0x88
+#define CMD_RET_XRI_BUF64_CX 0x89
+#define CMD_ELS_REQUEST64_CR 0x8A
+#define CMD_ELS_REQUEST64_CX 0x8B
+#define CMD_ABORT_MXRI64_CN 0x8C
+#define CMD_RCV_ELS_REQ64_CX 0x8D
+#define CMD_XMIT_ELS_RSP64_CX 0x95
+#define CMD_FCP_IWRITE64_CR 0x98
+#define CMD_FCP_IWRITE64_CX 0x99
+#define CMD_FCP_IREAD64_CR 0x9A
+#define CMD_FCP_IREAD64_CX 0x9B
+#define CMD_FCP_ICMND64_CR 0x9C
+#define CMD_FCP_ICMND64_CX 0x9D
+
+#define CMD_GEN_REQUEST64_CR 0xC2
+#define CMD_GEN_REQUEST64_CX 0xC3
+
+#define CMD_MAX_IOCB_CMD 0xE6
+#define CMD_IOCB_MASK 0xff
+
+#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
+ iocb */
+#define LPFC_MAX_ADPTMSG 32 /* max msg data */
+/*
+ * Define Status
+ */
+#define MBX_SUCCESS 0
+#define MBXERR_NUM_RINGS 1
+#define MBXERR_NUM_IOCBS 2
+#define MBXERR_IOCBS_EXCEEDED 3
+#define MBXERR_BAD_RING_NUMBER 4
+#define MBXERR_MASK_ENTRIES_RANGE 5
+#define MBXERR_MASKS_EXCEEDED 6
+#define MBXERR_BAD_PROFILE 7
+#define MBXERR_BAD_DEF_CLASS 8
+#define MBXERR_BAD_MAX_RESPONDER 9
+#define MBXERR_BAD_MAX_ORIGINATOR 10
+#define MBXERR_RPI_REGISTERED 11
+#define MBXERR_RPI_FULL 12
+#define MBXERR_NO_RESOURCES 13
+#define MBXERR_BAD_RCV_LENGTH 14
+#define MBXERR_DMA_ERROR 15
+#define MBXERR_ERROR 16
+#define MBX_NOT_FINISHED 255
+
+#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
+#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
+
+/*
+ * Begin Structure Definitions for Mailbox Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t tval;
+ uint8_t tmask;
+ uint8_t rval;
+ uint8_t rmask;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t rmask;
+ uint8_t rval;
+ uint8_t tmask;
+ uint8_t tval;
+#endif
+} RR_REG;
+
+struct ulp_bde {
+ uint32_t bdeAddress;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeReserved:4;
+ uint32_t bdeAddrHigh:4;
+ uint32_t bdeSize:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24;
+ uint32_t bdeAddrHigh:4;
+ uint32_t bdeReserved:4;
+#endif
+};
+
+struct ulp_bde64 { /* SLI-2 */
+ union ULP_BDE_TUS {
+ uint32_t w;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+#endif
+
+#define BUFF_USE_RSVD 0x01 /* bdeFlags */
+#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */
+#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */
+#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit
+ buffer */
+#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit
+ addr */
+#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
+#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
+#define BUFF_TYPE_INVALID 0x80 /* "" "" */
+ } f;
+ } tus;
+ uint32_t addrLow;
+ uint32_t addrHigh;
+};
+#define BDE64_SIZE_WORD 0
+#define BPL64_SIZE_WORD 0x40
+
+typedef struct ULP_BDL { /* SLI-2 */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeFlags:8; /* BDL Flags */
+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
+ uint32_t bdeFlags:8; /* BDL Flags */
+#endif
+
+ uint32_t addrLow; /* Address 0:31 */
+ uint32_t addrHigh; /* Address 32:63 */
+ uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
+} ULP_BDL;
+
+/* Structure for MB Command LOAD_SM and DOWN_LOAD */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:25;
+ uint32_t acknowledgment:1;
+ uint32_t version:1;
+ uint32_t erase_or_prog:1;
+ uint32_t update_flash:1;
+ uint32_t update_ram:1;
+ uint32_t method:1;
+ uint32_t load_cmplt:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t load_cmplt:1;
+ uint32_t method:1;
+ uint32_t update_ram:1;
+ uint32_t update_flash:1;
+ uint32_t erase_or_prog:1;
+ uint32_t version:1;
+ uint32_t acknowledgment:1;
+ uint32_t rsvd2:25;
+#endif
+
+ uint32_t dl_to_adr_low;
+ uint32_t dl_to_adr_high;
+ uint32_t dl_len;
+ union {
+ uint32_t dl_from_mbx_offset;
+ struct ulp_bde dl_from_bde;
+ struct ulp_bde64 dl_from_bde64;
+ } un;
+
+} LOAD_SM_VAR;
+
+/* Structure for MB Command READ_NVPARM (02) */
+
+typedef struct {
+ uint32_t rsvd1[3]; /* Read as all one's */
+ uint32_t rsvd2; /* Read as all zero's */
+ uint32_t portname[2]; /* N_PORT name */
+ uint32_t nodename[2]; /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pref_DID:24;
+ uint32_t hardAL_PA:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t hardAL_PA:8;
+ uint32_t pref_DID:24;
+#endif
+
+ uint32_t rsvd3[21]; /* Read as all one's */
+} READ_NV_VAR;
+
+/* Structure for MB Command WRITE_NVPARMS (03) */
+
+typedef struct {
+ uint32_t rsvd1[3]; /* Must be all one's */
+ uint32_t rsvd2; /* Must be all zero's */
+ uint32_t portname[2]; /* N_PORT name */
+ uint32_t nodename[2]; /* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pref_DID:24;
+ uint32_t hardAL_PA:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t hardAL_PA:8;
+ uint32_t pref_DID:24;
+#endif
+
+ uint32_t rsvd3[21]; /* Must be all one's */
+} WRITE_NV_VAR;
+
+/* Structure for MB Command RUN_BIU_DIAG (04) */
+/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
+
+typedef struct {
+ uint32_t rsvd1;
+ union {
+ struct {
+ struct ulp_bde xmit_bde;
+ struct ulp_bde rcv_bde;
+ } s1;
+ struct {
+ struct ulp_bde64 xmit_bde64;
+ struct ulp_bde64 rcv_bde64;
+ } s2;
+ } un;
+} BIU_DIAG_VAR;
+
+/* Structure for MB Command INIT_LINK (05) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:24;
+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
+ uint32_t rsvd1:24;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
+ uint8_t rsvd2;
+ uint16_t link_flags;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t link_flags;
+ uint8_t rsvd2;
+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
+#endif
+
+#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
+#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
+#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
+#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
+#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
+#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
+
+#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
+#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
+
+ uint32_t link_speed;
+#define LINK_SPEED_AUTO 0 /* Auto selection */
+#define LINK_SPEED_1G 1 /* 1 Gigabaud */
+#define LINK_SPEED_2G 2 /* 2 Gigabaud */
+#define LINK_SPEED_4G 4 /* 4 Gigabaud */
+#define LINK_SPEED_8G 8 /* 4 Gigabaud */
+#define LINK_SPEED_10G 16 /* 10 Gigabaud */
+
+} INIT_LINK_VAR;
+
+/* Structure for MB Command DOWN_LINK (06) */
+
+typedef struct {
+ uint32_t rsvd1;
+} DOWN_LINK_VAR;
+
+/* Structure for MB Command CONFIG_LINK (07) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cr:1;
+ uint32_t ci:1;
+ uint32_t cr_delay:6;
+ uint32_t cr_count:8;
+ uint32_t rsvd1:8;
+ uint32_t MaxBBC:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t MaxBBC:8;
+ uint32_t rsvd1:8;
+ uint32_t cr_count:8;
+ uint32_t cr_delay:6;
+ uint32_t ci:1;
+ uint32_t cr:1;
+#endif
+
+ uint32_t myId;
+ uint32_t rsvd2;
+ uint32_t edtov;
+ uint32_t arbtov;
+ uint32_t ratov;
+ uint32_t rttov;
+ uint32_t altov;
+ uint32_t crtov;
+ uint32_t citov;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rrq_enable:1;
+ uint32_t rrq_immed:1;
+ uint32_t rsvd4:29;
+ uint32_t ack0_enable:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ack0_enable:1;
+ uint32_t rsvd4:29;
+ uint32_t rrq_immed:1;
+ uint32_t rrq_enable:1;
+#endif
+} CONFIG_LINK;
+
+/* Structure for MB Command PART_SLIM (08)
+ * will be removed since SLI1 is no longer supported!
+ */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t offCiocb;
+ uint16_t numCiocb;
+ uint16_t offRiocb;
+ uint16_t numRiocb;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t numCiocb;
+ uint16_t offCiocb;
+ uint16_t numRiocb;
+ uint16_t offRiocb;
+#endif
+} RING_DEF;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t unused1:24;
+ uint32_t numRing:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t numRing:8;
+ uint32_t unused1:24;
+#endif
+
+ RING_DEF ringdef[4];
+ uint32_t hbainit;
+} PART_SLIM_VAR;
+
+/* Structure for MB Command CONFIG_RING (09) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t unused2:6;
+ uint32_t recvSeq:1;
+ uint32_t recvNotify:1;
+ uint32_t numMask:8;
+ uint32_t profile:8;
+ uint32_t unused1:4;
+ uint32_t ring:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ring:4;
+ uint32_t unused1:4;
+ uint32_t profile:8;
+ uint32_t numMask:8;
+ uint32_t recvNotify:1;
+ uint32_t recvSeq:1;
+ uint32_t unused2:6;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t maxRespXchg;
+ uint16_t maxOrigXchg;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t maxOrigXchg;
+ uint16_t maxRespXchg;
+#endif
+
+ RR_REG rrRegs[6];
+} CONFIG_RING_VAR;
+
+/* Structure for MB Command RESET_RING (10) */
+
+typedef struct {
+ uint32_t ring_no;
+} RESET_RING_VAR;
+
+/* Structure for MB Command READ_CONFIG (11) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cr:1;
+ uint32_t ci:1;
+ uint32_t cr_delay:6;
+ uint32_t cr_count:8;
+ uint32_t InitBBC:8;
+ uint32_t MaxBBC:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t MaxBBC:8;
+ uint32_t InitBBC:8;
+ uint32_t cr_count:8;
+ uint32_t cr_delay:6;
+ uint32_t ci:1;
+ uint32_t cr:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t topology:8;
+ uint32_t myDid:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myDid:24;
+ uint32_t topology:8;
+#endif
+
+ /* Defines for topology (defined previously) */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t AR:1;
+ uint32_t IR:1;
+ uint32_t rsvd1:29;
+ uint32_t ack0:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ack0:1;
+ uint32_t rsvd1:29;
+ uint32_t IR:1;
+ uint32_t AR:1;
+#endif
+
+ uint32_t edtov;
+ uint32_t arbtov;
+ uint32_t ratov;
+ uint32_t rttov;
+ uint32_t altov;
+ uint32_t lmt;
+#define LMT_RESERVED 0x0 /* Not used */
+#define LMT_266_10bit 0x1 /* 265.625 Mbaud 10 bit iface */
+#define LMT_532_10bit 0x2 /* 531.25 Mbaud 10 bit iface */
+#define LMT_1063_20bit 0x3 /* 1062.5 Mbaud 20 bit iface */
+#define LMT_1063_10bit 0x4 /* 1062.5 Mbaud 10 bit iface */
+#define LMT_2125_10bit 0x8 /* 2125 Mbaud 10 bit iface */
+#define LMT_4250_10bit 0x40 /* 4250 Mbaud 10 bit iface */
+
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t max_xri;
+ uint32_t max_iocb;
+ uint32_t max_rpi;
+ uint32_t avail_xri;
+ uint32_t avail_iocb;
+ uint32_t avail_rpi;
+ uint32_t default_rpi;
+} READ_CONFIG_VAR;
+
+/* Structure for MB Command READ_RCONFIG (12) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:7;
+ uint32_t recvNotify:1;
+ uint32_t numMask:8;
+ uint32_t profile:8;
+ uint32_t rsvd1:4;
+ uint32_t ring:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ring:4;
+ uint32_t rsvd1:4;
+ uint32_t profile:8;
+ uint32_t numMask:8;
+ uint32_t recvNotify:1;
+ uint32_t rsvd2:7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t maxResp;
+ uint16_t maxOrig;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t maxOrig;
+ uint16_t maxResp;
+#endif
+
+ RR_REG rrRegs[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t cmdRingOffset;
+ uint16_t cmdEntryCnt;
+ uint16_t rspRingOffset;
+ uint16_t rspEntryCnt;
+ uint16_t nextCmdOffset;
+ uint16_t rsvd3;
+ uint16_t nextRspOffset;
+ uint16_t rsvd4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t cmdEntryCnt;
+ uint16_t cmdRingOffset;
+ uint16_t rspEntryCnt;
+ uint16_t rspRingOffset;
+ uint16_t rsvd3;
+ uint16_t nextCmdOffset;
+ uint16_t rsvd4;
+ uint16_t nextRspOffset;
+#endif
+} READ_RCONF_VAR;
+
+/* Structure for MB Command READ_SPARM (13) */
+/* Structure for MB Command READ_SPARM64 (0x8D) */
+
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ union {
+ struct ulp_bde sp; /* This BDE points to struct serv_parm
+ structure */
+ struct ulp_bde64 sp64;
+ } un;
+} READ_SPARM_VAR;
+
+/* Structure for MB Command READ_STATUS (14) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:31;
+ uint32_t clrCounters:1;
+ uint16_t activeXriCnt;
+ uint16_t activeRpiCnt;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t clrCounters:1;
+ uint32_t rsvd1:31;
+ uint16_t activeRpiCnt;
+ uint16_t activeXriCnt;
+#endif
+
+ uint32_t xmitByteCnt;
+ uint32_t rcvByteCnt;
+ uint32_t xmitFrameCnt;
+ uint32_t rcvFrameCnt;
+ uint32_t xmitSeqCnt;
+ uint32_t rcvSeqCnt;
+ uint32_t totalOrigExchanges;
+ uint32_t totalRespExchanges;
+ uint32_t rcvPbsyCnt;
+ uint32_t rcvFbsyCnt;
+} READ_STATUS_VAR;
+
+/* Structure for MB Command READ_RPI (15) */
+/* Structure for MB Command READ_RPI64 (0x8F) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t nextRpi;
+ uint16_t reqRpi;
+ uint32_t rsvd2:8;
+ uint32_t DID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t reqRpi;
+ uint16_t nextRpi;
+ uint32_t DID:24;
+ uint32_t rsvd2:8;
+#endif
+
+ union {
+ struct ulp_bde sp;
+ struct ulp_bde64 sp64;
+ } un;
+
+} READ_RPI_VAR;
+
+/* Structure for MB Command READ_XRI (16) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t nextXri;
+ uint16_t reqXri;
+ uint16_t rsvd1;
+ uint16_t rpi;
+ uint32_t rsvd2:8;
+ uint32_t DID:24;
+ uint32_t rsvd3:8;
+ uint32_t SID:24;
+ uint32_t rsvd4;
+ uint8_t seqId;
+ uint8_t rsvd5;
+ uint16_t seqCount;
+ uint16_t oxId;
+ uint16_t rxId;
+ uint32_t rsvd6:30;
+ uint32_t si:1;
+ uint32_t exchOrig:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t reqXri;
+ uint16_t nextXri;
+ uint16_t rpi;
+ uint16_t rsvd1;
+ uint32_t DID:24;
+ uint32_t rsvd2:8;
+ uint32_t SID:24;
+ uint32_t rsvd3:8;
+ uint32_t rsvd4;
+ uint16_t seqCount;
+ uint8_t rsvd5;
+ uint8_t seqId;
+ uint16_t rxId;
+ uint16_t oxId;
+ uint32_t exchOrig:1;
+ uint32_t si:1;
+ uint32_t rsvd6:30;
+#endif
+} READ_XRI_VAR;
+
+/* Structure for MB Command READ_REV (17) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cv:1;
+ uint32_t rr:1;
+ uint32_t rsvd1:29;
+ uint32_t rv:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rv:1;
+ uint32_t rsvd1:29;
+ uint32_t rr:1;
+ uint32_t cv:1;
+#endif
+
+ uint32_t biuRev;
+ uint32_t smRev;
+ union {
+ uint32_t smFwRev;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t ProgType;
+ uint8_t ProgId;
+ uint16_t ProgVer:4;
+ uint16_t ProgRev:4;
+ uint16_t ProgFixLvl:2;
+ uint16_t ProgDistType:2;
+ uint16_t DistCnt:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t DistCnt:4;
+ uint16_t ProgDistType:2;
+ uint16_t ProgFixLvl:2;
+ uint16_t ProgRev:4;
+ uint16_t ProgVer:4;
+ uint8_t ProgId;
+ uint8_t ProgType;
+#endif
+
+ } b;
+ } un;
+ uint32_t endecRev;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t feaLevelHigh;
+ uint8_t feaLevelLow;
+ uint8_t fcphHigh;
+ uint8_t fcphLow;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t fcphLow;
+ uint8_t fcphHigh;
+ uint8_t feaLevelLow;
+ uint8_t feaLevelHigh;
+#endif
+
+ uint32_t postKernRev;
+ uint32_t opFwRev;
+ uint8_t opFwName[16];
+ uint32_t sli1FwRev;
+ uint8_t sli1FwName[16];
+ uint32_t sli2FwRev;
+ uint8_t sli2FwName[16];
+ uint32_t rsvd2;
+ uint32_t RandomData[7];
+} READ_REV_VAR;
+
+/* Structure for MB Command READ_LINK_STAT (18) */
+
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t linkFailureCnt;
+ uint32_t lossSyncCnt;
+
+ uint32_t lossSignalCnt;
+ uint32_t primSeqErrCnt;
+ uint32_t invalidXmitWord;
+ uint32_t crcCnt;
+ uint32_t primSeqTimeout;
+ uint32_t elasticOverrun;
+ uint32_t arbTimeout;
+} READ_LNK_VAR;
+
+/* Structure for MB Command REG_LOGIN (19) */
+/* Structure for MB Command REG_LOGIN64 (0x93) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1;
+ uint16_t rpi;
+ uint32_t rsvd2:8;
+ uint32_t did:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t rpi;
+ uint16_t rsvd1;
+ uint32_t did:24;
+ uint32_t rsvd2:8;
+#endif
+
+ union {
+ struct ulp_bde sp;
+ struct ulp_bde64 sp64;
+ } un;
+
+} REG_LOGIN_VAR;
+
+/* Word 30 contents for REG_LOGIN */
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1:12;
+ uint16_t wd30_class:4;
+ uint16_t xri;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t xri;
+ uint16_t wd30_class:4;
+ uint16_t rsvd1:12;
+#endif
+ } f;
+ uint32_t word;
+} REG_WD30;
+
+/* Structure for MB Command UNREG_LOGIN (20) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd1;
+ uint16_t rpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t rpi;
+ uint16_t rsvd1;
+#endif
+} UNREG_LOGIN_VAR;
+
+/* Structure for MB Command UNREG_D_ID (0x23) */
+
+typedef struct {
+ uint32_t did;
+} UNREG_D_ID_VAR;
+
+/* Structure for MB Command READ_LA (21) */
+/* Structure for MB Command READ_LA64 (0x95) */
+
+typedef struct {
+ uint32_t eventTag; /* Event tag */
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1:22;
+ uint32_t pb:1;
+ uint32_t il:1;
+ uint32_t attType:8;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t attType:8;
+ uint32_t il:1;
+ uint32_t pb:1;
+ uint32_t rsvd1:22;
+#endif
+
+#define AT_RESERVED 0x00 /* Reserved - attType */
+#define AT_LINK_UP 0x01 /* Link is up */
+#define AT_LINK_DOWN 0x02 /* Link is down */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t granted_AL_PA;
+ uint8_t lipAlPs;
+ uint8_t lipType;
+ uint8_t topology;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t topology;
+ uint8_t lipType;
+ uint8_t lipAlPs;
+ uint8_t granted_AL_PA;
+#endif
+
+#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
+#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
+
+ union {
+ struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
+ to */
+ /* store the LILP AL_PA position map into */
+ struct ulp_bde64 lilpBde64;
+ } un;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t Dlu:1;
+ uint32_t Dtf:1;
+ uint32_t Drsvd2:14;
+ uint32_t DlnkSpeed:8;
+ uint32_t DnlPort:4;
+ uint32_t Dtx:2;
+ uint32_t Drx:2;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t Drx:2;
+ uint32_t Dtx:2;
+ uint32_t DnlPort:4;
+ uint32_t DlnkSpeed:8;
+ uint32_t Drsvd2:14;
+ uint32_t Dtf:1;
+ uint32_t Dlu:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t Ulu:1;
+ uint32_t Utf:1;
+ uint32_t Ursvd2:14;
+ uint32_t UlnkSpeed:8;
+ uint32_t UnlPort:4;
+ uint32_t Utx:2;
+ uint32_t Urx:2;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t Urx:2;
+ uint32_t Utx:2;
+ uint32_t UnlPort:4;
+ uint32_t UlnkSpeed:8;
+ uint32_t Ursvd2:14;
+ uint32_t Utf:1;
+ uint32_t Ulu:1;
+#endif
+
+#define LA_UNKNW_LINK 0x0 /* lnkSpeed */
+#define LA_1GHZ_LINK 0x04 /* lnkSpeed */
+#define LA_2GHZ_LINK 0x08 /* lnkSpeed */
+#define LA_4GHZ_LINK 0x10 /* lnkSpeed */
+#define LA_8GHZ_LINK 0x20 /* lnkSpeed */
+#define LA_10GHZ_LINK 0x40 /* lnkSpeed */
+
+} READ_LA_VAR;
+
+/* Structure for MB Command CLEAR_LA (22) */
+
+typedef struct {
+ uint32_t eventTag; /* Event tag */
+ uint32_t rsvd1;
+} CLEAR_LA_VAR;
+
+/* Structure for MB Command DUMP */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd:25;
+ uint32_t ra:1;
+ uint32_t co:1;
+ uint32_t cv:1;
+ uint32_t type:4;
+ uint32_t entry_index:16;
+ uint32_t region_id:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t type:4;
+ uint32_t cv:1;
+ uint32_t co:1;
+ uint32_t ra:1;
+ uint32_t rsvd:25;
+ uint32_t region_id:16;
+ uint32_t entry_index:16;
+#endif
+
+ uint32_t rsvd1;
+ uint32_t word_cnt;
+ uint32_t resp_offset;
+} DUMP_VAR;
+
+#define DMP_MEM_REG 0x1
+#define DMP_NV_PARAMS 0x2
+
+#define DMP_REGION_VPD 0xe
+#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */
+#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
+#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
+
+typedef struct {
+ uint32_t pcbLen;
+ uint32_t pcbLow; /* bit 31:0 of memory based port config block */
+ uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
+ uint32_t hbainit[5];
+} CONFIG_PORT_VAR;
+
+/* SLI-2 Port Control Block */
+
+/* SLIM POINTER */
+#define SLIMOFF 0x30 /* WORD */
+
+typedef struct _SLI2_RDSC {
+ uint32_t cmdEntries;
+ uint32_t cmdAddrLow;
+ uint32_t cmdAddrHigh;
+
+ uint32_t rspEntries;
+ uint32_t rspAddrLow;
+ uint32_t rspAddrHigh;
+} SLI2_RDSC;
+
+typedef struct _PCB {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t type:8;
+#define TYPE_NATIVE_SLI2 0x01;
+ uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2 0x01;
+ uint32_t rsvd:12;
+ uint32_t maxRing:4;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t maxRing:4;
+ uint32_t rsvd:12;
+ uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2 0x01;
+ uint32_t type:8;
+#define TYPE_NATIVE_SLI2 0x01;
+#endif
+
+ uint32_t mailBoxSize;
+ uint32_t mbAddrLow;
+ uint32_t mbAddrHigh;
+
+ uint32_t hgpAddrLow;
+ uint32_t hgpAddrHigh;
+
+ uint32_t pgpAddrLow;
+ uint32_t pgpAddrHigh;
+ SLI2_RDSC rdsc[MAX_RINGS];
+} PCB_t;
+
+/* NEW_FEATURE */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd0:27;
+ uint32_t discardFarp:1;
+ uint32_t IPEnable:1;
+ uint32_t nodeName:1;
+ uint32_t portName:1;
+ uint32_t filterEnable:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t filterEnable:1;
+ uint32_t portName:1;
+ uint32_t nodeName:1;
+ uint32_t IPEnable:1;
+ uint32_t discardFarp:1;
+ uint32_t rsvd:27;
+#endif
+
+ uint8_t portname[8]; /* Used to be struct lpfc_name */
+ uint8_t nodename[8];
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t IPAddress;
+} CONFIG_FARP_VAR;
+
+/* Union of all Mailbox Command types */
+#define MAILBOX_CMD_WSIZE 32
+#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+
+typedef union {
+ uint32_t varWords[MAILBOX_CMD_WSIZE - 1];
+ LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
+ READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
+ WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
+ BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
+ INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
+ DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
+ CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
+ PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
+ CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
+ RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
+ READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
+ READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
+ READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
+ READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
+ READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
+ READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
+ READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
+ READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
+ REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
+ UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
+ READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
+ CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
+ DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
+ UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
+ CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */
+ CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
+} MAILVARIANTS;
+
+/*
+ * SLI-2 specific structures
+ */
+
+typedef struct {
+ uint32_t cmdPutInx;
+ uint32_t rspGetInx;
+} HGP;
+
+typedef struct {
+ uint32_t cmdGetInx;
+ uint32_t rspPutInx;
+} PGP;
+
+typedef struct _SLI2_DESC {
+ HGP host[MAX_RINGS];
+ uint32_t unused1[16];
+ PGP port[MAX_RINGS];
+} SLI2_DESC;
+
+typedef union {
+ SLI2_DESC s2;
+} SLI_VAR;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
+#endif
+
+ MAILVARIANTS un;
+ SLI_VAR us;
+} MAILBOX_t;
+
+/*
+ * Begin Structure Definitions for IOCB Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t statAction;
+ uint8_t statRsn;
+ uint8_t statBaExp;
+ uint8_t statLocalError;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t statLocalError;
+ uint8_t statBaExp;
+ uint8_t statRsn;
+ uint8_t statAction;
+#endif
+ /* statRsn P/F_RJT reason codes */
+#define RJT_BAD_D_ID 0x01 /* Invalid D_ID field */
+#define RJT_BAD_S_ID 0x02 /* Invalid S_ID field */
+#define RJT_UNAVAIL_TEMP 0x03 /* N_Port unavailable temp. */
+#define RJT_UNAVAIL_PERM 0x04 /* N_Port unavailable perm. */
+#define RJT_UNSUP_CLASS 0x05 /* Class not supported */
+#define RJT_DELIM_ERR 0x06 /* Delimiter usage error */
+#define RJT_UNSUP_TYPE 0x07 /* Type not supported */
+#define RJT_BAD_CONTROL 0x08 /* Invalid link conrtol */
+#define RJT_BAD_RCTL 0x09 /* R_CTL invalid */
+#define RJT_BAD_FCTL 0x0A /* F_CTL invalid */
+#define RJT_BAD_OXID 0x0B /* OX_ID invalid */
+#define RJT_BAD_RXID 0x0C /* RX_ID invalid */
+#define RJT_BAD_SEQID 0x0D /* SEQ_ID invalid */
+#define RJT_BAD_DFCTL 0x0E /* DF_CTL invalid */
+#define RJT_BAD_SEQCNT 0x0F /* SEQ_CNT invalid */
+#define RJT_BAD_PARM 0x10 /* Param. field invalid */
+#define RJT_XCHG_ERR 0x11 /* Exchange error */
+#define RJT_PROT_ERR 0x12 /* Protocol error */
+#define RJT_BAD_LENGTH 0x13 /* Invalid Length */
+#define RJT_UNEXPECTED_ACK 0x14 /* Unexpected ACK */
+#define RJT_LOGIN_REQUIRED 0x16 /* Login required */
+#define RJT_TOO_MANY_SEQ 0x17 /* Excessive sequences */
+#define RJT_XCHG_NOT_STRT 0x18 /* Exchange not started */
+#define RJT_UNSUP_SEC_HDR 0x19 /* Security hdr not supported */
+#define RJT_UNAVAIL_PATH 0x1A /* Fabric Path not available */
+#define RJT_VENDOR_UNIQUE 0xFF /* Vendor unique error */
+
+#define IOERR_SUCCESS 0x00 /* statLocalError */
+#define IOERR_MISSING_CONTINUE 0x01
+#define IOERR_SEQUENCE_TIMEOUT 0x02
+#define IOERR_INTERNAL_ERROR 0x03
+#define IOERR_INVALID_RPI 0x04
+#define IOERR_NO_XRI 0x05
+#define IOERR_ILLEGAL_COMMAND 0x06
+#define IOERR_XCHG_DROPPED 0x07
+#define IOERR_ILLEGAL_FIELD 0x08
+#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_TOO_MANY_BUFFERS 0x0A
+#define IOERR_RCV_BUFFER_WAITING 0x0B
+#define IOERR_NO_CONNECTION 0x0C
+#define IOERR_TX_DMA_FAILED 0x0D
+#define IOERR_RX_DMA_FAILED 0x0E
+#define IOERR_ILLEGAL_FRAME 0x0F
+#define IOERR_EXTRA_DATA 0x10
+#define IOERR_NO_RESOURCES 0x11
+#define IOERR_RESERVED 0x12
+#define IOERR_ILLEGAL_LENGTH 0x13
+#define IOERR_UNSUPPORTED_FEATURE 0x14
+#define IOERR_ABORT_IN_PROGRESS 0x15
+#define IOERR_ABORT_REQUESTED 0x16
+#define IOERR_RECEIVE_BUFFER_TIMEOUT 0x17
+#define IOERR_LOOP_OPEN_FAILURE 0x18
+#define IOERR_RING_RESET 0x19
+#define IOERR_LINK_DOWN 0x1A
+#define IOERR_CORRUPTED_DATA 0x1B
+#define IOERR_CORRUPTED_RPI 0x1C
+#define IOERR_OUT_OF_ORDER_DATA 0x1D
+#define IOERR_OUT_OF_ORDER_ACK 0x1E
+#define IOERR_DUP_FRAME 0x1F
+#define IOERR_LINK_CONTROL_FRAME 0x20 /* ACK_N received */
+#define IOERR_BAD_HOST_ADDRESS 0x21
+#define IOERR_RCV_HDRBUF_WAITING 0x22
+#define IOERR_MISSING_HDR_BUFFER 0x23
+#define IOERR_MSEQ_CHAIN_CORRUPTED 0x24
+#define IOERR_ABORTMULT_REQUESTED 0x25
+#define IOERR_BUFFER_SHORTAGE 0x28
+#define IOERR_DEFAULT 0x29
+#define IOERR_CNT 0x2A
+
+#define IOERR_DRVR_MASK 0x100
+#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
+#define IOERR_SLI_BRESET 0x102
+#define IOERR_SLI_ABORTED 0x103
+} PARM_ERR;
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t Rctl; /* R_CTL field */
+ uint8_t Type; /* TYPE field */
+ uint8_t Dfctl; /* DF_CTL field */
+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
+ uint8_t Dfctl; /* DF_CTL field */
+ uint8_t Type; /* TYPE field */
+ uint8_t Rctl; /* R_CTL field */
+#endif
+
+#define BC 0x02 /* Broadcast Received - Fctl */
+#define SI 0x04 /* Sequence Initiative */
+#define LA 0x08 /* Ignore Link Attention state */
+#define LS 0x80 /* Last Sequence */
+ } hcsw;
+ uint32_t reserved;
+} WORD5;
+
+/* IOCB Command template for a generic response */
+typedef struct {
+ uint32_t reserved[4];
+ PARM_ERR perr;
+} GENERIC_RSP;
+
+/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
+typedef struct {
+ struct ulp_bde xrsqbde[2];
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} XR_SEQ_FIELDS;
+
+/* IOCB Command template for ELS_REQUEST */
+typedef struct {
+ struct ulp_bde elsReq;
+ struct ulp_bde elsRsp;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word4Rsvd:7;
+ uint32_t fl:1;
+ uint32_t myID:24;
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myID:24;
+ uint32_t fl:1;
+ uint32_t word4Rsvd:7;
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST;
+
+/* IOCB Command template for RCV_ELS_REQ */
+typedef struct {
+ struct ulp_bde elsReq[2];
+ uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ;
+
+/* IOCB Command template for ABORT / CLOSE_XRI */
+typedef struct {
+ uint32_t rsvd[3];
+ uint32_t abortType;
+#define ABORT_TYPE_ABTX 0x00000000
+#define ABORT_TYPE_ABTS 0x00000001
+ uint32_t parm;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
+#endif
+} AC_XRI;
+
+/* IOCB Command template for ABORT_MXRI64 */
+typedef struct {
+ uint32_t rsvd[3];
+ uint32_t abortType;
+ uint32_t parm;
+ uint32_t iotag32;
+} A_MXRI64;
+
+/* IOCB Command template for GET_RPI */
+typedef struct {
+ uint32_t rsvd[4];
+ uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} GET_RPI;
+
+/* IOCB Command template for all FCP Initiator commands */
+typedef struct {
+ struct ulp_bde fcpi_cmnd; /* FCP_CMND payload descriptor */
+ struct ulp_bde fcpi_rsp; /* Rcv buffer */
+ uint32_t fcpi_parm;
+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
+} FCPI_FIELDS;
+
+/* IOCB Command template for all FCP Target commands */
+typedef struct {
+ struct ulp_bde fcpt_Buffer[2]; /* FCP_CMND payload descriptor */
+ uint32_t fcpt_Offset;
+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
+} FCPT_FIELDS;
+
+/* SLI-2 IOCB structure definitions */
+
+/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} XMT_SEQ_FIELDS64;
+
+/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
+typedef struct {
+ struct ulp_bde64 rcvBde;
+ uint32_t rsvd1;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} RCV_SEQ_FIELDS64;
+
+/* IOCB Command template for ELS_REQUEST64 */
+typedef struct {
+ ULP_BDL bdl;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word4Rsvd:7;
+ uint32_t fl:1;
+ uint32_t myID:24;
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t myID:24;
+ uint32_t fl:1;
+ uint32_t word4Rsvd:7;
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST64;
+
+/* IOCB Command template for GEN_REQUEST64 */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t xrsqRo; /* Starting Relative Offset */
+ WORD5 w5; /* Header control/status word */
+} GEN_REQUEST64;
+
+/* IOCB Command template for RCV_ELS_REQ64 */
+typedef struct {
+ struct ulp_bde64 elsReq;
+ uint32_t rcvd1;
+ uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t word5Rsvd:8;
+ uint32_t remoteID:24;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t remoteID:24;
+ uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ64;
+
+/* IOCB Command template for all 64 bit FCP Initiator commands */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t fcpi_parm;
+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
+} FCPI_FIELDS64;
+
+/* IOCB Command template for all 64 bit FCP Target commands */
+typedef struct {
+ ULP_BDL bdl;
+ uint32_t fcpt_Offset;
+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
+} FCPT_FIELDS64;
+
+typedef struct _IOCB { /* IOCB structure */
+ union {
+ GENERIC_RSP grsp; /* Generic response */
+ XR_SEQ_FIELDS xrseq; /* XMIT / BCAST / RCV_SEQUENCE cmd */
+ struct ulp_bde cont[3]; /* up to 3 continuation bdes */
+ RCV_ELS_REQ rcvels; /* RCV_ELS_REQ template */
+ AC_XRI acxri; /* ABORT / CLOSE_XRI template */
+ A_MXRI64 amxri; /* abort multiple xri command overlay */
+ GET_RPI getrpi; /* GET_RPI template */
+ FCPI_FIELDS fcpi; /* FCP Initiator template */
+ FCPT_FIELDS fcpt; /* FCP target template */
+
+ /* SLI-2 structures */
+
+ struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
+ bde_64s */
+ ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
+ GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
+ RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
+ XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */
+ FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */
+ FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */
+
+ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
+ } un;
+ union {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ulpContext; /* High order bits word 6 */
+ uint16_t ulpIoTag; /* Low order bits word 6 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t ulpIoTag; /* Low order bits word 6 */
+ uint16_t ulpContext; /* High order bits word 6 */
+#endif
+ } t1;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ulpContext; /* High order bits word 6 */
+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
+ uint16_t ulpContext; /* High order bits word 6 */
+#endif
+ } t2;
+ } un1;
+#define ulpContext un1.t1.ulpContext
+#define ulpIoTag un1.t1.ulpIoTag
+#define ulpIoTag0 un1.t2.ulpIoTag0
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ulpTimeout:8;
+ uint32_t ulpXS:1;
+ uint32_t ulpFCP2Rcvy:1;
+ uint32_t ulpPU:2;
+ uint32_t ulpIr:1;
+ uint32_t ulpClass:3;
+ uint32_t ulpCommand:8;
+ uint32_t ulpStatus:4;
+ uint32_t ulpBdeCount:2;
+ uint32_t ulpLe:1;
+ uint32_t ulpOwner:1; /* Low order bit word 7 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t ulpOwner:1; /* Low order bit word 7 */
+ uint32_t ulpLe:1;
+ uint32_t ulpBdeCount:2;
+ uint32_t ulpStatus:4;
+ uint32_t ulpCommand:8;
+ uint32_t ulpClass:3;
+ uint32_t ulpIr:1;
+ uint32_t ulpPU:2;
+ uint32_t ulpFCP2Rcvy:1;
+ uint32_t ulpXS:1;
+ uint32_t ulpTimeout:8;
+#endif
+
+#define PARM_UNUSED 0 /* PU field (Word 4) not used */
+#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
+#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
+#define CLASS1 0 /* Class 1 */
+#define CLASS2 1 /* Class 2 */
+#define CLASS3 2 /* Class 3 */
+#define CLASS_FCP_INTERMIX 7 /* FCP Data->Cls 1, all else->Cls 2 */
+
+#define IOSTAT_SUCCESS 0x0 /* ulpStatus - HBA defined */
+#define IOSTAT_FCP_RSP_ERROR 0x1
+#define IOSTAT_REMOTE_STOP 0x2
+#define IOSTAT_LOCAL_REJECT 0x3
+#define IOSTAT_NPORT_RJT 0x4
+#define IOSTAT_FABRIC_RJT 0x5
+#define IOSTAT_NPORT_BSY 0x6
+#define IOSTAT_FABRIC_BSY 0x7
+#define IOSTAT_INTERMED_RSP 0x8
+#define IOSTAT_LS_RJT 0x9
+#define IOSTAT_BA_RJT 0xA
+#define IOSTAT_RSVD1 0xB
+#define IOSTAT_RSVD2 0xC
+#define IOSTAT_RSVD3 0xD
+#define IOSTAT_RSVD4 0xE
+#define IOSTAT_RSVD5 0xF
+#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
+#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
+#define IOSTAT_CNT 0x11
+
+} IOCB_t;
+
+
+#define SLI1_SLIM_SIZE (4 * 1024)
+
+/* Up to 498 IOCBs will fit into 16k
+ * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
+ */
+#define SLI2_SLIM_SIZE (16 * 1024)
+
+/* Maximum IOCBs that will fit in SLI2 slim */
+#define MAX_SLI2_IOCB 498
+
+struct lpfc_sli2_slim {
+ MAILBOX_t mbx;
+ PCB_t pcb;
+ IOCB_t IOCBs[MAX_SLI2_IOCB];
+};
+
+/*******************************************************************
+This macro check PCI device to allow special handling for LC HBAs.
+
+Parameters:
+device : struct pci_dev 's device field
+
+return 1 => TRUE
+ 0 => FALSE
+ *******************************************************************/
+static inline int
+lpfc_is_LC_HBA(unsigned short device)
+{
+ if ((device == PCI_DEVICE_ID_TFLY) ||
+ (device == PCI_DEVICE_ID_PFLY) ||
+ (device == PCI_DEVICE_ID_LP101) ||
+ (device == PCI_DEVICE_ID_BMID) ||
+ (device == PCI_DEVICE_ID_BSMB) ||
+ (device == PCI_DEVICE_ID_ZMID) ||
+ (device == PCI_DEVICE_ID_ZSMB) ||
+ (device == PCI_DEVICE_ID_RFLY))
+ return 1;
+ else
+ return 0;
+}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
new file mode 100644
index 00000000000..233c912b63c
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -0,0 +1,1739 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_init.c 1.233 2005/04/13 11:59:09EDT sf_support Exp $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+
+static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *);
+static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+static int lpfc_post_rcv_buf(struct lpfc_hba *);
+
+static struct scsi_transport_template *lpfc_transport_template = NULL;
+static DEFINE_IDR(lpfc_hba_index);
+
+/************************************************************************/
+/* */
+/* lpfc_config_port_prep */
+/* This routine will do LPFC initialization prior to the */
+/* CONFIG_PORT mailbox command. This will be initialized */
+/* as a SLI layer callback routine. */
+/* This routine returns 0 on success or -ERESTART if it wants */
+/* the SLI layer to reset the HBA and try again. Any */
+/* other return value indicates an error. */
+/* */
+/************************************************************************/
+int
+lpfc_config_port_prep(struct lpfc_hba * phba)
+{
+ lpfc_vpd_t *vp = &phba->vpd;
+ int i = 0, rc;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ char *lpfc_vpd_data = NULL;
+ uint16_t offset = 0;
+ static char licensed[56] =
+ "key unlock for use with gnu public licensed code only\0";
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ mb = &pmb->mb;
+ phba->hba_state = LPFC_INIT_MBX_CMDS;
+
+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+ uint32_t *ptext = (uint32_t *) licensed;
+
+ for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
+ *ptext = cpu_to_be32(*ptext);
+
+ lpfc_read_nv(phba, pmb);
+ memset((char*)mb->un.varRDnvp.rsvd3, 0,
+ sizeof (mb->un.varRDnvp.rsvd3));
+ memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
+ sizeof (licensed));
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX,
+ "%d:0324 Config Port initialization "
+ "error, mbxCmd x%x READ_NVPARM, "
+ "mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ }
+ memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
+ sizeof (mb->un.varRDnvp.nodename));
+ }
+
+ /* Setup and issue mailbox READ REV command */
+ lpfc_read_rev(phba, pmb);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0439 Adapter failed to init, mbxCmd x%x "
+ "READ_REV, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ }
+
+ /* The HBA's current state is provided by the ProgType and rr fields.
+ * Read and check the value of these fields before continuing to config
+ * this port.
+ */
+ if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
+ /* Old firmware */
+ vp->rev.rBit = 0;
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0440 Adapter failed to init, mbxCmd x%x "
+ "READ_REV detected outdated firmware"
+ "Data: x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, 0);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ERESTART;
+ } else {
+ vp->rev.rBit = 1;
+ vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
+ memcpy(vp->rev.sli1FwName,
+ (char*)mb->un.varRdRev.sli1FwName, 16);
+ vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
+ memcpy(vp->rev.sli2FwName,
+ (char *)mb->un.varRdRev.sli2FwName, 16);
+ }
+
+ /* Save information as VPD data */
+ vp->rev.biuRev = mb->un.varRdRev.biuRev;
+ vp->rev.smRev = mb->un.varRdRev.smRev;
+ vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
+ vp->rev.endecRev = mb->un.varRdRev.endecRev;
+ vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
+ vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
+ vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
+ vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
+ vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
+ vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+
+ if (lpfc_is_LC_HBA(phba->pcidev->device))
+ memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
+ sizeof (phba->RandomData));
+
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+ /* Get adapter VPD information */
+ pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
+ if (!pmb->context2)
+ goto out_free_mbox;
+ lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
+ if (!lpfc_vpd_data)
+ goto out_free_context2;
+
+ do {
+ lpfc_dump_mem(phba, pmb, offset);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0441 VPD not present on adapter, "
+ "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ kfree(lpfc_vpd_data);
+ lpfc_vpd_data = NULL;
+ break;
+ }
+
+ lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
+ mb->un.varDmp.word_cnt);
+ offset += mb->un.varDmp.word_cnt;
+ } while (mb->un.varDmp.word_cnt);
+ lpfc_parse_vpd(phba, lpfc_vpd_data);
+
+ kfree(lpfc_vpd_data);
+out_free_context2:
+ kfree(pmb->context2);
+out_free_mbox:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return 0;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_config_port_post */
+/* This routine will do LPFC initialization after the */
+/* CONFIG_PORT mailbox command. This will be initialized */
+/* as a SLI layer callback routine. */
+/* This routine returns 0 on success. Any other return value */
+/* indicates an error. */
+/* */
+/************************************************************************/
+int
+lpfc_config_port_post(struct lpfc_hba * phba)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t status, timeout;
+ int i, j, rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+ mb = &pmb->mb;
+
+ lpfc_config_link(phba, pmb);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0447 Adapter failed init, mbxCmd x%x "
+ "CONFIG_LINK mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ /* Get login parameters for NID. */
+ lpfc_read_sparam(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0448 Adapter failed init, mbxCmd x%x "
+ "READ_SPARM mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ return -EIO;
+ }
+
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+
+ memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ pmb->context1 = NULL;
+
+ memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+ /* If no serial number in VPD data, use low 6 bytes of WWNN */
+ /* This should be consolidated into parse_vpd ? - mr */
+ if (phba->SerialNumber[0] == 0) {
+ uint8_t *outptr;
+
+ outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
+ for (i = 0; i < 12; i++) {
+ status = *outptr++;
+ j = ((status & 0xf0) >> 4);
+ if (j <= 9)
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x30 + (uint8_t) j);
+ else
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+ i++;
+ j = (status & 0xf);
+ if (j <= 9)
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x30 + (uint8_t) j);
+ else
+ phba->SerialNumber[i] =
+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+ }
+ }
+
+ /* This should turn on DELAYED ABTS for ELS timeouts */
+ lpfc_set_slim(phba, pmb, 0x052198, 0x1);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+
+ lpfc_read_config(phba, pmb);
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0453 Adapter failed to init, mbxCmd x%x "
+ "READ_CONFIG, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ /* Reset the DFT_HBA_Q_DEPTH to the max xri */
+ if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
+ phba->cfg_hba_queue_depth =
+ mb->un.varRdConfig.max_xri + 1;
+
+ phba->lmt = mb->un.varRdConfig.lmt;
+ /* HBA is not 4GB capable, or HBA is not 2GB capable,
+ don't let link speed ask for it */
+ if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) &&
+ (phba->cfg_link_speed > LINK_SPEED_2G)) ||
+ (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) &&
+ (phba->cfg_link_speed > LINK_SPEED_1G))) {
+ /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_LINK_EVENT,
+ "%d:1302 Invalid speed for this board: "
+ "Reset link speed to auto: x%x\n",
+ phba->brd_no,
+ phba->cfg_link_speed);
+ phba->cfg_link_speed = LINK_SPEED_AUTO;
+ }
+
+ phba->hba_state = LPFC_LINK_DOWN;
+
+ /* Only process IOCBs on ring 0 till hba_state is READY */
+ if (psli->ring[psli->ip_ring].cmdringaddr)
+ psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->ring[psli->fcp_ring].cmdringaddr)
+ psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->ring[psli->next_ring].cmdringaddr)
+ psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
+
+ /* Post receive buffers for desired rings */
+ lpfc_post_rcv_buf(phba);
+
+ /* Enable appropriate host interrupts */
+ spin_lock_irq(phba->host->host_lock);
+ status = readl(phba->HCregaddr);
+ status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
+ if (psli->num_rings > 0)
+ status |= HC_R0INT_ENA;
+ if (psli->num_rings > 1)
+ status |= HC_R1INT_ENA;
+ if (psli->num_rings > 2)
+ status |= HC_R2INT_ENA;
+ if (psli->num_rings > 3)
+ status |= HC_R3INT_ENA;
+
+ writel(status, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(phba->host->host_lock);
+
+ /*
+ * Setup the ring 0 (els) timeout handler
+ */
+ timeout = phba->fc_ratov << 1;
+ phba->els_tmofunc.expires = jiffies + HZ * timeout;
+ add_timer(&phba->els_tmofunc);
+
+ lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0454 Adapter failed to init, mbxCmd x%x "
+ "INIT_LINK, mbxStatus x%x\n",
+ phba->brd_no,
+ mb->mbxCommand, mb->mbxStatus);
+
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ phba->hba_state = LPFC_HBA_ERROR;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ /* MBOX buffer will be freed in mbox compl */
+
+ i = 0;
+ while ((phba->hba_state != LPFC_HBA_READY) ||
+ (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
+ ((phba->fc_map_cnt == 0) && (i<2)) ||
+ (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
+ /* Check every second for 30 retries. */
+ i++;
+ if (i > 30) {
+ break;
+ }
+ if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
+ /* The link is down. Set linkdown timeout */
+ break;
+ }
+
+ /* Delay for 1 second to give discovery time to complete. */
+ msleep(1000);
+
+ }
+
+ /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
+ * any potential PRLIs to flush thru the SLI sub-system.
+ */
+ msleep(50);
+
+ return (0);
+}
+
+/************************************************************************/
+/* */
+/* lpfc_hba_down_prep */
+/* This routine will do LPFC uninitialization before the */
+/* HBA is reset when bringing down the SLI Layer. This will be */
+/* initialized as a SLI layer callback routine. */
+/* This routine returns 0 on success. Any other return value */
+/* indicates an error. */
+/* */
+/************************************************************************/
+int
+lpfc_hba_down_prep(struct lpfc_hba * phba)
+{
+ /* Disable interrupts */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Cleanup potential discovery resources */
+ lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_cmd(phba);
+ lpfc_disc_flush_list(phba);
+
+ return (0);
+}
+
+/************************************************************************/
+/* */
+/* lpfc_handle_eratt */
+/* This routine will handle processing a Host Attention */
+/* Error Status event. This will be initialized */
+/* as a SLI layer callback routine. */
+/* */
+/************************************************************************/
+void
+lpfc_handle_eratt(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
+ /*
+ * If a reset is sent to the HBA restore PCI configuration registers.
+ */
+ if ( phba->hba_state == LPFC_INIT_START ) {
+ mdelay(1);
+ readl(phba->HCregaddr); /* flush */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Restore PCI cmd register */
+ pci_write_config_word(phba->pcidev,
+ PCI_COMMAND, phba->pci_cfg_value);
+ }
+
+ if (phba->work_hs & HS_FFER6) {
+ /* Re-establishing Link */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "%d:1301 Re-establishing Link "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->work_hs,
+ phba->work_status[0], phba->work_status[1]);
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag |= FC_ESTABLISH_LINK;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /*
+ * Firmware stops when it triggled erratt with HS_FFER6.
+ * That could cause the I/Os dropped by the firmware.
+ * Error iocb (I/O) on txcmplq and let the SCSI layer
+ * retry it after re-establishing link.
+ */
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+
+
+ /*
+ * There was a firmware error. Take the hba offline and then
+ * attempt to restart it.
+ */
+ lpfc_offline(phba);
+ if (lpfc_online(phba) == 0) { /* Initialize the HBA */
+ mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
+ return;
+ }
+ } else {
+ /* The if clause above forces this code path when the status
+ * failure is a value other than FFER6. Do not call the offline
+ * twice. This is the adapter hardware error path.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0457 Adapter Hardware Error "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, phba->work_hs,
+ phba->work_status[0], phba->work_status[1]);
+
+ lpfc_offline(phba);
+
+ /*
+ * Restart all traffic to this host. Since the fc_transport
+ * block functions (future) were not called in lpfc_offline,
+ * don't call them here.
+ */
+ scsi_unblock_requests(phba->host);
+ }
+}
+
+/************************************************************************/
+/* */
+/* lpfc_handle_latt */
+/* This routine will handle processing a Host Attention */
+/* Link Status event. This will be initialized */
+/* as a SLI layer callback routine. */
+/* */
+/************************************************************************/
+void
+lpfc_handle_latt(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *pmb;
+ volatile uint32_t control;
+ struct lpfc_dmabuf *mp;
+ int rc = -ENOMEM;
+
+ pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb)
+ goto lpfc_handle_latt_err_exit;
+
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp)
+ goto lpfc_handle_latt_free_pmb;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt)
+ goto lpfc_handle_latt_free_mp;
+
+ rc = -EIO;
+
+
+ psli->slistat.link_event++;
+ lpfc_read_la(phba, pmb, mp);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
+ rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED)
+ goto lpfc_handle_latt_free_mp;
+
+ /* Clear Link Attention in HA REG */
+ spin_lock_irq(phba->host->host_lock);
+ writel(HA_LATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irq(phba->host->host_lock);
+
+ return;
+
+lpfc_handle_latt_free_mp:
+ kfree(mp);
+lpfc_handle_latt_free_pmb:
+ kfree(pmb);
+lpfc_handle_latt_err_exit:
+ /* Enable Link attention interrupts */
+ spin_lock_irq(phba->host->host_lock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Clear Link Attention in HA REG */
+ writel(HA_LATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_linkdown(phba);
+ phba->hba_state = LPFC_HBA_ERROR;
+
+ /* The other case is an error from issue_mbox */
+ if (rc == -ENOMEM)
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_MBOX,
+ "%d:0300 READ_LA: no buffers\n",
+ phba->brd_no);
+
+ return;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_parse_vpd */
+/* This routine will parse the VPD data */
+/* */
+/************************************************************************/
+static int
+lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd)
+{
+ uint8_t lenlo, lenhi;
+ uint32_t Length;
+ int i, j;
+ int finished = 0;
+ int index = 0;
+
+ if (!vpd)
+ return 0;
+
+ /* Vital Product */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_INIT,
+ "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
+ (uint32_t) vpd[3]);
+ do {
+ switch (vpd[index]) {
+ case 0x82:
+ index += 1;
+ lenlo = vpd[index];
+ index += 1;
+ lenhi = vpd[index];
+ index += 1;
+ i = ((((unsigned short)lenhi) << 8) + lenlo);
+ index += i;
+ break;
+ case 0x90:
+ index += 1;
+ lenlo = vpd[index];
+ index += 1;
+ lenhi = vpd[index];
+ index += 1;
+ Length = ((((unsigned short)lenhi) << 8) + lenlo);
+
+ while (Length > 0) {
+ /* Look for Serial Number */
+ if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->SerialNumber[j++] = vpd[index++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
+ phba->vpd_flag |= VPD_MODEL_DESC;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->ModelDesc[j++] = vpd[index++];
+ if (j == 255)
+ break;
+ }
+ phba->ModelDesc[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
+ phba->vpd_flag |= VPD_MODEL_NAME;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->ModelName[j++] = vpd[index++];
+ if (j == 79)
+ break;
+ }
+ phba->ModelName[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->ProgramType[j++] = vpd[index++];
+ if (j == 255)
+ break;
+ }
+ phba->ProgramType[j] = 0;
+ continue;
+ }
+ else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
+ phba->vpd_flag |= VPD_PORT;
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ j = 0;
+ Length -= (3+i);
+ while(i--) {
+ phba->Port[j++] = vpd[index++];
+ if (j == 19)
+ break;
+ }
+ phba->Port[j] = 0;
+ continue;
+ }
+ else {
+ index += 2;
+ i = vpd[index];
+ index += 1;
+ index += i;
+ Length -= (3 + i);
+ }
+ }
+ finished = 0;
+ break;
+ case 0x78:
+ finished = 1;
+ break;
+ default:
+ index ++;
+ break;
+ }
+ } while (!finished && (index < 108));
+
+ return(1);
+}
+
+static void
+lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
+{
+ lpfc_vpd_t *vp;
+ uint32_t id;
+ char str[16];
+
+ vp = &phba->vpd;
+ pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
+
+ switch ((id >> 16) & 0xffff) {
+ case PCI_DEVICE_ID_SUPERFLY:
+ if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
+ strcpy(str, "LP7000 1");
+ else
+ strcpy(str, "LP7000E 1");
+ break;
+ case PCI_DEVICE_ID_DRAGONFLY:
+ strcpy(str, "LP8000 1");
+ break;
+ case PCI_DEVICE_ID_CENTAUR:
+ if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
+ strcpy(str, "LP9002 2");
+ else
+ strcpy(str, "LP9000 1");
+ break;
+ case PCI_DEVICE_ID_RFLY:
+ strcpy(str, "LP952 2");
+ break;
+ case PCI_DEVICE_ID_PEGASUS:
+ strcpy(str, "LP9802 2");
+ break;
+ case PCI_DEVICE_ID_THOR:
+ strcpy(str, "LP10000 2");
+ break;
+ case PCI_DEVICE_ID_VIPER:
+ strcpy(str, "LPX1000 10");
+ break;
+ case PCI_DEVICE_ID_PFLY:
+ strcpy(str, "LP982 2");
+ break;
+ case PCI_DEVICE_ID_TFLY:
+ strcpy(str, "LP1050 2");
+ break;
+ case PCI_DEVICE_ID_HELIOS:
+ strcpy(str, "LP11000 4");
+ break;
+ case PCI_DEVICE_ID_BMID:
+ strcpy(str, "LP1150 4");
+ break;
+ case PCI_DEVICE_ID_BSMB:
+ strcpy(str, "LP111 4");
+ break;
+ case PCI_DEVICE_ID_ZEPHYR:
+ strcpy(str, "LP11000e 4");
+ break;
+ case PCI_DEVICE_ID_ZMID:
+ strcpy(str, "LP1150e 4");
+ break;
+ case PCI_DEVICE_ID_ZSMB:
+ strcpy(str, "LP111e 4");
+ break;
+ case PCI_DEVICE_ID_LP101:
+ strcpy(str, "LP101 2");
+ break;
+ case PCI_DEVICE_ID_LP10000S:
+ strcpy(str, "LP10000-S 2");
+ break;
+ }
+ if (mdp)
+ sscanf(str, "%s", mdp);
+ if (descp)
+ sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
+ "Channel Adapter", str);
+}
+
+/**************************************************/
+/* lpfc_post_buffer */
+/* */
+/* This routine will post count buffers to the */
+/* ring with the QUE_RING_BUF_CN command. This */
+/* allows 3 buffers / command to be posted. */
+/* Returns the number of buffers NOT posted. */
+/**************************************************/
+int
+lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
+ int type)
+{
+ IOCB_t *icmd;
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ struct lpfc_iocbq *iocb = NULL;
+ struct lpfc_dmabuf *mp1, *mp2;
+
+ cnt += pring->missbufcnt;
+
+ /* While there are buffers to post */
+ while (cnt > 0) {
+ /* Allocate buffer for command iocb */
+ spin_lock_irq(phba->host->host_lock);
+ list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list);
+ spin_unlock_irq(phba->host->host_lock);
+ if (iocb == NULL) {
+ pring->missbufcnt = cnt;
+ return cnt;
+ }
+ memset(iocb, 0, sizeof (struct lpfc_iocbq));
+ icmd = &iocb->iocb;
+
+ /* 2 buffers can be posted per command */
+ /* Allocate buffer to post */
+ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp1)
+ mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &mp1->phys);
+ if (mp1 == 0 || mp1->virt == 0) {
+ if (mp1)
+ kfree(mp1);
+ spin_lock_irq(phba->host->host_lock);
+ list_add_tail(&iocb->list, lpfc_iocb_list);
+ spin_unlock_irq(phba->host->host_lock);
+ pring->missbufcnt = cnt;
+ return cnt;
+ }
+
+ INIT_LIST_HEAD(&mp1->list);
+ /* Allocate buffer to post */
+ if (cnt > 1) {
+ mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp2)
+ mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &mp2->phys);
+ if (mp2 == 0 || mp2->virt == 0) {
+ if (mp2)
+ kfree(mp2);
+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+ kfree(mp1);
+ spin_lock_irq(phba->host->host_lock);
+ list_add_tail(&iocb->list, lpfc_iocb_list);
+ spin_unlock_irq(phba->host->host_lock);
+ pring->missbufcnt = cnt;
+ return cnt;
+ }
+
+ INIT_LIST_HEAD(&mp2->list);
+ } else {
+ mp2 = NULL;
+ }
+
+ icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
+ icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
+ icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
+ icmd->ulpBdeCount = 1;
+ cnt--;
+ if (mp2) {
+ icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
+ icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
+ icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
+ cnt--;
+ icmd->ulpBdeCount = 2;
+ }
+
+ icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
+ icmd->ulpLe = 1;
+
+ spin_lock_irq(phba->host->host_lock);
+ if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+ kfree(mp1);
+ cnt++;
+ if (mp2) {
+ lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
+ kfree(mp2);
+ cnt++;
+ }
+ list_add_tail(&iocb->list, lpfc_iocb_list);
+ pring->missbufcnt = cnt;
+ spin_unlock_irq(phba->host->host_lock);
+ return cnt;
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_sli_ringpostbuf_put(phba, pring, mp1);
+ if (mp2) {
+ lpfc_sli_ringpostbuf_put(phba, pring, mp2);
+ }
+ }
+ pring->missbufcnt = 0;
+ return 0;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_post_rcv_buf */
+/* This routine post initial rcv buffers to the configured rings */
+/* */
+/************************************************************************/
+static int
+lpfc_post_rcv_buf(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ /* Ring 0, ELS / CT buffers */
+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
+ /* Ring 2 - FCP no buffers needed */
+
+ return 0;
+}
+
+#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
+
+/************************************************************************/
+/* */
+/* lpfc_sha_init */
+/* */
+/************************************************************************/
+static void
+lpfc_sha_init(uint32_t * HashResultPointer)
+{
+ HashResultPointer[0] = 0x67452301;
+ HashResultPointer[1] = 0xEFCDAB89;
+ HashResultPointer[2] = 0x98BADCFE;
+ HashResultPointer[3] = 0x10325476;
+ HashResultPointer[4] = 0xC3D2E1F0;
+}
+
+/************************************************************************/
+/* */
+/* lpfc_sha_iterate */
+/* */
+/************************************************************************/
+static void
+lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
+{
+ int t;
+ uint32_t TEMP;
+ uint32_t A, B, C, D, E;
+ t = 16;
+ do {
+ HashWorkingPointer[t] =
+ S(1,
+ HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
+ 8] ^
+ HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
+ } while (++t <= 79);
+ t = 0;
+ A = HashResultPointer[0];
+ B = HashResultPointer[1];
+ C = HashResultPointer[2];
+ D = HashResultPointer[3];
+ E = HashResultPointer[4];
+
+ do {
+ if (t < 20) {
+ TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
+ } else if (t < 40) {
+ TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
+ } else if (t < 60) {
+ TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
+ } else {
+ TEMP = (B ^ C ^ D) + 0xCA62C1D6;
+ }
+ TEMP += S(5, A) + E + HashWorkingPointer[t];
+ E = D;
+ D = C;
+ C = S(30, B);
+ B = A;
+ A = TEMP;
+ } while (++t <= 79);
+
+ HashResultPointer[0] += A;
+ HashResultPointer[1] += B;
+ HashResultPointer[2] += C;
+ HashResultPointer[3] += D;
+ HashResultPointer[4] += E;
+
+}
+
+/************************************************************************/
+/* */
+/* lpfc_challenge_key */
+/* */
+/************************************************************************/
+static void
+lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
+{
+ *HashWorking = (*RandomChallenge ^ *HashWorking);
+}
+
+/************************************************************************/
+/* */
+/* lpfc_hba_init */
+/* */
+/************************************************************************/
+void
+lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
+{
+ int t;
+ uint32_t *HashWorking;
+ uint32_t *pwwnn = phba->wwnn;
+
+ HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
+ if (!HashWorking)
+ return;
+
+ memset(HashWorking, 0, (80 * sizeof(uint32_t)));
+ HashWorking[0] = HashWorking[78] = *pwwnn++;
+ HashWorking[1] = HashWorking[79] = *pwwnn;
+
+ for (t = 0; t < 7; t++)
+ lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
+
+ lpfc_sha_init(hbainit);
+ lpfc_sha_iterate(hbainit, HashWorking);
+ kfree(HashWorking);
+}
+
+static void
+lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ /* clean up phba - lpfc specific */
+ lpfc_can_disctmo(phba);
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
+ nlp_listp) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
+ nlp_listp) {
+ lpfc_nlp_remove(phba, ndlp);
+ }
+
+ INIT_LIST_HEAD(&phba->fc_nlpmap_list);
+ INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
+ INIT_LIST_HEAD(&phba->fc_unused_list);
+ INIT_LIST_HEAD(&phba->fc_plogi_list);
+ INIT_LIST_HEAD(&phba->fc_adisc_list);
+ INIT_LIST_HEAD(&phba->fc_reglogin_list);
+ INIT_LIST_HEAD(&phba->fc_prli_list);
+ INIT_LIST_HEAD(&phba->fc_npr_list);
+
+ phba->fc_map_cnt = 0;
+ phba->fc_unmap_cnt = 0;
+ phba->fc_plogi_cnt = 0;
+ phba->fc_adisc_cnt = 0;
+ phba->fc_reglogin_cnt = 0;
+ phba->fc_prli_cnt = 0;
+ phba->fc_npr_cnt = 0;
+ phba->fc_unused_cnt= 0;
+ return;
+}
+
+static void
+lpfc_establish_link_tmo(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ unsigned long iflag;
+
+
+ /* Re-establishing Link, timer expired */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "%d:1300 Re-establishing Link, timer expired "
+ "Data: x%x x%x\n",
+ phba->brd_no, phba->fc_flag, phba->hba_state);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ phba->fc_flag &= ~FC_ESTABLISH_LINK;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+static int
+lpfc_stop_timer(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ /* Instead of a timer, this has been converted to a
+ * deferred procedding list.
+ */
+ while (!list_empty(&phba->freebufList)) {
+
+ struct lpfc_dmabuf *mp = NULL;
+
+ list_remove_head((&phba->freebufList), mp,
+ struct lpfc_dmabuf, list);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ }
+
+ del_timer_sync(&phba->fc_estabtmo);
+ del_timer_sync(&phba->fc_disctmo);
+ del_timer_sync(&phba->fc_fdmitmo);
+ del_timer_sync(&phba->els_tmofunc);
+ psli = &phba->sli;
+ del_timer_sync(&psli->mbox_tmo);
+ return(1);
+}
+
+int
+lpfc_online(struct lpfc_hba * phba)
+{
+ if (!phba)
+ return 0;
+
+ if (!(phba->fc_flag & FC_OFFLINE_MODE))
+ return 0;
+
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_INIT,
+ "%d:0458 Bring Adapter online\n",
+ phba->brd_no);
+
+ if (!lpfc_sli_queue_setup(phba))
+ return 1;
+
+ if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
+ return 1;
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_OFFLINE_MODE;
+ spin_unlock_irq(phba->host->host_lock);
+
+ /*
+ * Restart all traffic to this host. Since the fc_transport block
+ * functions (future) were not called in lpfc_offline, don't call them
+ * here.
+ */
+ scsi_unblock_requests(phba->host);
+ return 0;
+}
+
+int
+lpfc_offline(struct lpfc_hba * phba)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli;
+ unsigned long iflag;
+ int i = 0;
+
+ if (!phba)
+ return 0;
+
+ if (phba->fc_flag & FC_OFFLINE_MODE)
+ return 0;
+
+ /*
+ * Don't call the fc_transport block api (future). The device is
+ * going offline and causing a timer to fire in the midlayer is
+ * unproductive. Just block all new requests until the driver
+ * comes back online.
+ */
+ scsi_block_requests(phba->host);
+ psli = &phba->sli;
+ pring = &psli->ring[psli->fcp_ring];
+
+ lpfc_linkdown(phba);
+
+ /* The linkdown event takes 30 seconds to timeout. */
+ while (pring->txcmplq_cnt) {
+ mdelay(10);
+ if (i++ > 3000)
+ break;
+ }
+
+ /* stop all timers associated with this hba */
+ lpfc_stop_timer(phba);
+ phba->work_hba_events = 0;
+
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_INIT,
+ "%d:0460 Bring Adapter offline\n",
+ phba->brd_no);
+
+ /* Bring down the SLI Layer and cleanup. The HBA is offline
+ now. */
+ lpfc_sli_hba_down(phba);
+ lpfc_cleanup(phba, 1);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ phba->fc_flag |= FC_OFFLINE_MODE;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return 0;
+}
+
+/******************************************************************************
+* Function name: lpfc_scsi_free
+*
+* Description: Called from lpfc_pci_remove_one free internal driver resources
+*
+******************************************************************************/
+static int
+lpfc_scsi_free(struct lpfc_hba * phba)
+{
+ struct lpfc_scsi_buf *sb, *sb_next;
+ struct lpfc_iocbq *io, *io_next;
+
+ spin_lock_irq(phba->host->host_lock);
+ /* Release all the lpfc_scsi_bufs maintained by this host. */
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+ list_del(&sb->list);
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+ sb->dma_handle);
+ kfree(sb);
+ phba->total_scsi_bufs--;
+ }
+
+ /* Release all the lpfc_iocbq entries maintained by this host. */
+ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
+ list_del(&io->list);
+ kfree(io);
+ phba->total_iocbq_bufs--;
+ }
+
+ spin_unlock_irq(phba->host->host_lock);
+
+ return 0;
+}
+
+
+static int __devinit
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct Scsi_Host *host;
+ struct lpfc_hba *phba;
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+ unsigned long bar0map_len, bar2map_len;
+ int error = -ENODEV, retval;
+ int i;
+ u64 wwname;
+
+ if (pci_enable_device(pdev))
+ goto out;
+ if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
+ goto out_disable_device;
+
+ host = scsi_host_alloc(&lpfc_template,
+ sizeof (struct lpfc_hba) + sizeof (unsigned long));
+ if (!host)
+ goto out_release_regions;
+
+ phba = (struct lpfc_hba*)host->hostdata;
+ memset(phba, 0, sizeof (struct lpfc_hba));
+ phba->link_stats = (void *)&phba[1];
+ phba->host = host;
+
+ phba->fc_flag |= FC_LOADING;
+ phba->pcidev = pdev;
+
+ /* Assign an unused board number */
+ if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
+ goto out_put_host;
+
+ error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
+ if (error)
+ goto out_put_host;
+
+ host->unique_id = phba->brd_no;
+
+ INIT_LIST_HEAD(&phba->ctrspbuflist);
+ INIT_LIST_HEAD(&phba->rnidrspbuflist);
+ INIT_LIST_HEAD(&phba->freebufList);
+
+ /* Initialize timers used by driver */
+ init_timer(&phba->fc_estabtmo);
+ phba->fc_estabtmo.function = lpfc_establish_link_tmo;
+ phba->fc_estabtmo.data = (unsigned long)phba;
+ init_timer(&phba->fc_disctmo);
+ phba->fc_disctmo.function = lpfc_disc_timeout;
+ phba->fc_disctmo.data = (unsigned long)phba;
+
+ init_timer(&phba->fc_fdmitmo);
+ phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
+ phba->fc_fdmitmo.data = (unsigned long)phba;
+ init_timer(&phba->els_tmofunc);
+ phba->els_tmofunc.function = lpfc_els_timeout;
+ phba->els_tmofunc.data = (unsigned long)phba;
+ psli = &phba->sli;
+ init_timer(&psli->mbox_tmo);
+ psli->mbox_tmo.function = lpfc_mbox_timeout;
+ psli->mbox_tmo.data = (unsigned long)phba;
+
+ /*
+ * Get all the module params for configuring this host and then
+ * establish the host parameters.
+ */
+ lpfc_get_cfgparam(phba);
+
+ host->max_id = LPFC_MAX_TARGET;
+ host->max_lun = phba->cfg_max_luns;
+ host->this_id = -1;
+
+ /* Initialize all internally managed lists. */
+ INIT_LIST_HEAD(&phba->fc_nlpmap_list);
+ INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
+ INIT_LIST_HEAD(&phba->fc_unused_list);
+ INIT_LIST_HEAD(&phba->fc_plogi_list);
+ INIT_LIST_HEAD(&phba->fc_adisc_list);
+ INIT_LIST_HEAD(&phba->fc_reglogin_list);
+ INIT_LIST_HEAD(&phba->fc_prli_list);
+ INIT_LIST_HEAD(&phba->fc_npr_list);
+
+
+ pci_set_master(pdev);
+ retval = pci_set_mwi(pdev);
+ if (retval)
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "Warning: pci_set_mwi returned %d\n", retval);
+
+ if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
+ if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
+ goto out_idr_remove;
+
+ /*
+ * Get the bus address of Bar0 and Bar2 and the number of bytes
+ * required by each mapping.
+ */
+ phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
+ bar0map_len = pci_resource_len(phba->pcidev, 0);
+
+ phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
+ bar2map_len = pci_resource_len(phba->pcidev, 2);
+
+ /* Map HBA SLIM and Control Registers to a kernel virtual address. */
+ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+ phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+
+ /* Allocate memory for SLI-2 structures */
+ phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
+ &phba->slim2p_mapping, GFP_KERNEL);
+ if (!phba->slim2p)
+ goto out_iounmap;
+
+
+ /* Initialize the SLI Layer to run with lpfc HBAs. */
+ lpfc_sli_setup(phba);
+ lpfc_sli_queue_setup(phba);
+
+ error = lpfc_mem_alloc(phba);
+ if (error)
+ goto out_free_slim;
+
+ /* Initialize and populate the iocb list per host. */
+ INIT_LIST_HEAD(&phba->lpfc_iocb_list);
+ for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
+ iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+ if (iocbq_entry == NULL) {
+ printk(KERN_ERR "%s: only allocated %d iocbs of "
+ "expected %d count. Unloading driver.\n",
+ __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
+ error = -ENOMEM;
+ goto out_free_iocbq;
+ }
+
+ memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
+ spin_lock_irq(phba->host->host_lock);
+ list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
+ phba->total_iocbq_bufs++;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+
+ /* Initialize HBA structure */
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ phba->fc_altov = FF_DEF_ALTOV;
+ phba->fc_arbtov = FF_DEF_ARBTOV;
+
+ INIT_LIST_HEAD(&phba->work_list);
+ phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
+ phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+
+ /* Startup the kernel thread for this host adapter. */
+ phba->worker_thread = kthread_run(lpfc_do_work, phba,
+ "lpfc_worker_%d", phba->brd_no);
+ if (IS_ERR(phba->worker_thread)) {
+ error = PTR_ERR(phba->worker_thread);
+ goto out_free_iocbq;
+ }
+
+ /* We can rely on a queue depth attribute only after SLI HBA setup */
+ host->can_queue = phba->cfg_hba_queue_depth - 10;
+
+ /* Tell the midlayer we support 16 byte commands */
+ host->max_cmd_len = 16;
+
+ /* Initialize the list of scsi buffers used by driver for scsi IO. */
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+
+ host->transportt = lpfc_transport_template;
+ host->hostdata[0] = (unsigned long)phba;
+ pci_set_drvdata(pdev, host);
+ error = scsi_add_host(host, &pdev->dev);
+ if (error)
+ goto out_kthread_stop;
+
+ error = lpfc_alloc_sysfs_attr(phba);
+ if (error)
+ goto out_kthread_stop;
+
+ error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
+ LPFC_DRIVER_NAME, phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0451 Enable interrupt handler failed\n",
+ phba->brd_no);
+ goto out_free_sysfs_attr;
+ }
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+ phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+ phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+ phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+ error = lpfc_sli_hba_setup(phba);
+ if (error)
+ goto out_free_irq;
+
+ /*
+ * set fixed host attributes
+ * Must done after lpfc_sli_hba_setup()
+ */
+
+ memcpy(&wwname, &phba->fc_nodename, sizeof(u64));
+ fc_host_node_name(host) = be64_to_cpu(wwname);
+ memcpy(&wwname, &phba->fc_portname, sizeof(u64));
+ fc_host_port_name(host) = be64_to_cpu(wwname);
+ fc_host_supported_classes(host) = FC_COS_CLASS3;
+
+ memset(fc_host_supported_fc4s(host), 0,
+ sizeof(fc_host_supported_fc4s(host)));
+ fc_host_supported_fc4s(host)[2] = 1;
+ fc_host_supported_fc4s(host)[7] = 1;
+
+ lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
+
+ fc_host_supported_speeds(host) = 0;
+ switch (FC_JEDEC_ID(phba->vpd.rev.biuRev)) {
+ case VIPER_JEDEC_ID:
+ fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
+ break;
+ case HELIOS_JEDEC_ID:
+ fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
+ /* Fall through */
+ case CENTAUR_2G_JEDEC_ID:
+ case PEGASUS_JEDEC_ID:
+ case THOR_JEDEC_ID:
+ fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
+ /* Fall through */
+ default:
+ fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT;
+ }
+
+ fc_host_maxframe_size(host) =
+ ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
+
+ /* This value is also unchanging */
+ memset(fc_host_active_fc4s(host), 0,
+ sizeof(fc_host_active_fc4s(host)));
+ fc_host_active_fc4s(host)[2] = 1;
+ fc_host_active_fc4s(host)[7] = 1;
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_LOADING;
+ spin_unlock_irq(phba->host->host_lock);
+ return 0;
+
+out_free_irq:
+ lpfc_stop_timer(phba);
+ phba->work_hba_events = 0;
+ free_irq(phba->pcidev->irq, phba);
+out_free_sysfs_attr:
+ lpfc_free_sysfs_attr(phba);
+out_kthread_stop:
+ kthread_stop(phba->worker_thread);
+out_free_iocbq:
+ list_for_each_entry_safe(iocbq_entry, iocbq_next,
+ &phba->lpfc_iocb_list, list) {
+ spin_lock_irq(phba->host->host_lock);
+ kfree(iocbq_entry);
+ phba->total_iocbq_bufs--;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ lpfc_mem_free(phba);
+out_free_slim:
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
+ phba->slim2p_mapping);
+out_iounmap:
+ iounmap(phba->ctrl_regs_memmap_p);
+ iounmap(phba->slim_memmap_p);
+out_idr_remove:
+ idr_remove(&lpfc_hba_index, phba->brd_no);
+out_put_host:
+ scsi_host_put(host);
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out:
+ return error;
+}
+
+static void __devexit
+lpfc_pci_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
+ unsigned long iflag;
+
+ lpfc_free_sysfs_attr(phba);
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ phba->fc_flag |= FC_UNLOADING;
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+
+ fc_remove_host(phba->host);
+ scsi_remove_host(phba->host);
+
+ kthread_stop(phba->worker_thread);
+
+ /*
+ * Bring down the SLI Layer. This step disable all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA.
+ */
+ lpfc_sli_hba_down(phba);
+
+ /* Release the irq reservation */
+ free_irq(phba->pcidev->irq, phba);
+
+ lpfc_cleanup(phba, 0);
+ lpfc_stop_timer(phba);
+ phba->work_hba_events = 0;
+
+ /*
+ * Call scsi_free before mem_free since scsi bufs are released to their
+ * corresponding pools here.
+ */
+ lpfc_scsi_free(phba);
+ lpfc_mem_free(phba);
+
+ /* Free resources associated with SLI2 interface */
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p, phba->slim2p_mapping);
+
+ /* unmap adapter SLIM and Control Registers */
+ iounmap(phba->ctrl_regs_memmap_p);
+ iounmap(phba->slim_memmap_p);
+
+ pci_release_regions(phba->pcidev);
+ pci_disable_device(phba->pcidev);
+
+ idr_remove(&lpfc_hba_index, phba->brd_no);
+ scsi_host_put(phba->host);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_device_id lpfc_id_table[] = {
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, lpfc_id_table);
+
+static struct pci_driver lpfc_driver = {
+ .name = LPFC_DRIVER_NAME,
+ .id_table = lpfc_id_table,
+ .probe = lpfc_pci_probe_one,
+ .remove = __devexit_p(lpfc_pci_remove_one),
+};
+
+static int __init
+lpfc_init(void)
+{
+ int error = 0;
+
+ printk(LPFC_MODULE_DESC "\n");
+
+ lpfc_transport_template =
+ fc_attach_transport(&lpfc_transport_functions);
+ if (!lpfc_transport_template)
+ return -ENOMEM;
+ error = pci_register_driver(&lpfc_driver);
+ if (error)
+ fc_release_transport(lpfc_transport_template);
+
+ return error;
+}
+
+static void __exit
+lpfc_exit(void)
+{
+ pci_unregister_driver(&lpfc_driver);
+ fc_release_transport(lpfc_transport_template);
+}
+
+module_init(lpfc_init);
+module_exit(lpfc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(LPFC_MODULE_DESC);
+MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
+MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
new file mode 100644
index 00000000000..a85268880fa
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -0,0 +1,41 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_logmsg.h 1.32 2005/01/25 17:52:01EST sf_support Exp $
+ */
+
+#define LOG_ELS 0x1 /* ELS events */
+#define LOG_DISCOVERY 0x2 /* Link discovery events */
+#define LOG_MBOX 0x4 /* Mailbox events */
+#define LOG_INIT 0x8 /* Initialization events */
+#define LOG_LINK_EVENT 0x10 /* Link events */
+#define LOG_IP 0x20 /* IP traffic history */
+#define LOG_FCP 0x40 /* FCP traffic history */
+#define LOG_NODE 0x80 /* Node table events */
+#define LOG_MISC 0x400 /* Miscellaneous events */
+#define LOG_SLI 0x800 /* SLI events */
+#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */
+#define LOG_LIBDFC 0x2000 /* Libdfc events */
+#define LOG_ALL_MSG 0xffff /* LOG all messages */
+
+#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
+ { if (((mask) &(phba)->cfg_log_verbose) || (level[1] <= '3')) \
+ dev_printk(level, &((phba)->pcidev)->dev, fmt, ##arg); }
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
new file mode 100644
index 00000000000..8712a80fe74
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -0,0 +1,646 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_mbox.c 1.85 2005/04/13 11:59:11EDT sf_support Exp $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_compat.h"
+
+/**********************************************/
+
+/* mailbox command */
+/**********************************************/
+void
+lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
+{
+ MAILBOX_t *mb;
+ void *ctx;
+
+ mb = &pmb->mb;
+ ctx = pmb->context2;
+
+ /* Setup to dump VPD region */
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.entry_index = offset;
+ mb->un.varDmp.region_id = DMP_REGION_VPD;
+ mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
+ mb->un.varDmp.co = 0;
+ mb->un.varDmp.resp_offset = 0;
+ pmb->context2 = ctx;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_nv Issue a READ NVPARAM */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_READ_NV;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_la Issue a READ LA */
+/* mailbox command */
+/**********************************************/
+int
+lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
+{
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ INIT_LIST_HEAD(&mp->list);
+ mb->mbxCommand = MBX_READ_LA64;
+ mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
+ mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
+
+ /* Save address for later completion and set the owner to host so that
+ * the FW knows this mailbox is available for processing.
+ */
+ pmb->context1 = (uint8_t *) mp;
+ mb->mbxOwner = OWN_HOST;
+ return (0);
+}
+
+/**********************************************/
+/* lpfc_clear_la Issue a CLEAR LA */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varClearLA.eventTag = phba->fc_eventTag;
+ mb->mbxCommand = MBX_CLEAR_LA;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**************************************************/
+/* lpfc_config_link Issue a CONFIG LINK */
+/* mailbox command */
+/**************************************************/
+void
+lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ /* NEW_FEATURE
+ * SLI-2, Coalescing Response Feature.
+ */
+ if (phba->cfg_cr_delay) {
+ mb->un.varCfgLnk.cr = 1;
+ mb->un.varCfgLnk.ci = 1;
+ mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
+ mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
+ }
+
+ mb->un.varCfgLnk.myId = phba->fc_myDID;
+ mb->un.varCfgLnk.edtov = phba->fc_edtov;
+ mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
+ mb->un.varCfgLnk.ratov = phba->fc_ratov;
+ mb->un.varCfgLnk.rttov = phba->fc_rttov;
+ mb->un.varCfgLnk.altov = phba->fc_altov;
+ mb->un.varCfgLnk.crtov = phba->fc_crtov;
+ mb->un.varCfgLnk.citov = phba->fc_citov;
+
+ if (phba->cfg_ack0)
+ mb->un.varCfgLnk.ack0_enable = 1;
+
+ mb->mbxCommand = MBX_CONFIG_LINK;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_init_link Issue an INIT LINK */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_init_link(struct lpfc_hba * phba,
+ LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
+{
+ lpfc_vpd_t *vpd;
+ struct lpfc_sli *psli;
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ psli = &phba->sli;
+ switch (topology) {
+ case FLAGS_TOPOLOGY_MODE_LOOP_PT:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+ break;
+ case FLAGS_TOPOLOGY_MODE_PT_PT:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+ break;
+ case FLAGS_TOPOLOGY_MODE_LOOP:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+ break;
+ case FLAGS_TOPOLOGY_MODE_PT_LOOP:
+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+ break;
+ }
+
+ /* NEW_FEATURE
+ * Setting up the link speed
+ */
+ vpd = &phba->vpd;
+ if (vpd->rev.feaLevelHigh >= 0x02){
+ switch(linkspeed){
+ case LINK_SPEED_1G:
+ case LINK_SPEED_2G:
+ case LINK_SPEED_4G:
+ mb->un.varInitLnk.link_flags |=
+ FLAGS_LINK_SPEED;
+ mb->un.varInitLnk.link_speed = linkspeed;
+ break;
+ case LINK_SPEED_AUTO:
+ default:
+ mb->un.varInitLnk.link_speed =
+ LINK_SPEED_AUTO;
+ break;
+ }
+
+ }
+ else
+ mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+
+ mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
+ mb->mbxOwner = OWN_HOST;
+ mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_sparam Issue a READ SPARAM */
+/* mailbox command */
+/**********************************************/
+int
+lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_dmabuf *mp;
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->mbxOwner = OWN_HOST;
+
+ /* Get a buffer to hold the HBAs Service Parameters */
+
+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+ if (mp)
+ kfree(mp);
+ mb->mbxCommand = MBX_READ_SPARM64;
+ /* READ_SPARAM: no buffers */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_MBOX,
+ "%d:0301 READ_SPARAM: no buffers\n",
+ phba->brd_no);
+ return (1);
+ }
+ INIT_LIST_HEAD(&mp->list);
+ mb->mbxCommand = MBX_READ_SPARM64;
+ mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+ mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+ /* save address for completion */
+ pmb->context1 = mp;
+
+ return (0);
+}
+
+/********************************************/
+/* lpfc_unreg_did Issue a UNREG_DID */
+/* mailbox command */
+/********************************************/
+void
+lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregDID.did = did;
+
+ mb->mbxCommand = MBX_UNREG_D_ID;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/***********************************************/
+
+/* command to write slim */
+/***********************************************/
+void
+lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
+ uint32_t value)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
+ /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
+
+ /*
+ * Always turn on DELAYED ABTS for ELS timeouts
+ */
+ if ((addr == 0x052198) && (value == 0))
+ value = 1;
+
+ mb->un.varWords[0] = addr;
+ mb->un.varWords[1] = value;
+
+ mb->mbxCommand = MBX_SET_SLIM;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
+/* lpfc_read_nv Issue a READ CONFIG */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->mbxCommand = MBX_READ_CONFIG;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/********************************************/
+/* lpfc_reg_login Issue a REG_LOGIN */
+/* mailbox command */
+/********************************************/
+int
+lpfc_reg_login(struct lpfc_hba * phba,
+ uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag)
+{
+ uint8_t *sparam;
+ struct lpfc_dmabuf *mp;
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varRegLogin.rpi = 0;
+ mb->un.varRegLogin.did = did;
+ mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
+
+ mb->mbxOwner = OWN_HOST;
+
+ /* Get a buffer to hold NPorts Service Parameters */
+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == NULL) ||
+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+ if (mp)
+ kfree(mp);
+
+ mb->mbxCommand = MBX_REG_LOGIN64;
+ /* REG_LOGIN: no buffers */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_MBOX,
+ "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
+ phba->brd_no,
+ (uint32_t) did, (uint32_t) flag);
+ return (1);
+ }
+ INIT_LIST_HEAD(&mp->list);
+ sparam = mp->virt;
+
+ /* Copy param's into a new buffer */
+ memcpy(sparam, param, sizeof (struct serv_parm));
+
+ /* save address for completion */
+ pmb->context1 = (uint8_t *) mp;
+
+ mb->mbxCommand = MBX_REG_LOGIN64;
+ mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+ mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+ mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+ return (0);
+}
+
+/**********************************************/
+/* lpfc_unreg_login Issue a UNREG_LOGIN */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+ mb->un.varUnregLogin.rsvd1 = 0;
+
+ mb->mbxCommand = MBX_UNREG_LOGIN;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+static void
+lpfc_config_pcb_setup(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ PCB_t *pcbp = &phba->slim2p->pcb;
+ dma_addr_t pdma_addr;
+ uint32_t offset;
+ uint32_t iocbCnt;
+ int i;
+
+ psli->MBhostaddr = (uint32_t *)&phba->slim2p->mbx;
+ pcbp->maxRing = (psli->num_rings - 1);
+
+ iocbCnt = 0;
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ /* A ring MUST have both cmd and rsp entries defined to be
+ valid */
+ if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
+ pcbp->rdsc[i].cmdEntries = 0;
+ pcbp->rdsc[i].rspEntries = 0;
+ pcbp->rdsc[i].cmdAddrHigh = 0;
+ pcbp->rdsc[i].rspAddrHigh = 0;
+ pcbp->rdsc[i].cmdAddrLow = 0;
+ pcbp->rdsc[i].rspAddrLow = 0;
+ pring->cmdringaddr = NULL;
+ pring->rspringaddr = NULL;
+ continue;
+ }
+ /* Command ring setup for ring */
+ pring->cmdringaddr =
+ (void *)&phba->slim2p->IOCBs[iocbCnt];
+ pcbp->rdsc[i].cmdEntries = pring->numCiocb;
+
+ offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
+ (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
+ pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
+ iocbCnt += pring->numCiocb;
+
+ /* Response ring setup for ring */
+ pring->rspringaddr =
+ (void *)&phba->slim2p->IOCBs[iocbCnt];
+
+ pcbp->rdsc[i].rspEntries = pring->numRiocb;
+ offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
+ (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
+ pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
+ iocbCnt += pring->numRiocb;
+ }
+}
+
+void
+lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->un.varRdRev.cv = 1;
+ mb->mbxCommand = MBX_READ_REV;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+void
+lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
+{
+ int i;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varCfgRing.ring = ring;
+ mb->un.varCfgRing.maxOrigXchg = 0;
+ mb->un.varCfgRing.maxRespXchg = 0;
+ mb->un.varCfgRing.recvNotify = 1;
+
+ psli = &phba->sli;
+ pring = &psli->ring[ring];
+ mb->un.varCfgRing.numMask = pring->num_mask;
+ mb->mbxCommand = MBX_CONFIG_RING;
+ mb->mbxOwner = OWN_HOST;
+
+ /* Is this ring configured for a specific profile */
+ if (pring->prt[0].profile) {
+ mb->un.varCfgRing.profile = pring->prt[0].profile;
+ return;
+ }
+
+ /* Otherwise we setup specific rctl / type masks for this ring */
+ for (i = 0; i < pring->num_mask; i++) {
+ mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
+ if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ)
+ mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
+ else
+ mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
+ mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
+ mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
+ }
+
+ return;
+}
+
+void
+lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ dma_addr_t pdma_addr;
+ uint32_t bar_low, bar_high;
+ size_t offset;
+ HGP hgp;
+ void __iomem *to_slim;
+
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_CONFIG_PORT;
+ mb->mbxOwner = OWN_HOST;
+
+ mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
+
+ offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
+ mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+
+ /* Now setup pcb */
+ phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
+ phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
+
+ /* Setup Mailbox pointers */
+ phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t);
+ offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
+ phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
+
+ /*
+ * Setup Host Group ring pointer.
+ *
+ * For efficiency reasons, the ring get/put pointers can be
+ * placed in adapter memory (SLIM) rather than in host memory.
+ * This allows firmware to avoid PCI reads/writes when updating
+ * and checking pointers.
+ *
+ * The firmware recognizes the use of SLIM memory by comparing
+ * the address of the get/put pointers structure with that of
+ * the SLIM BAR (BAR0).
+ *
+ * Caution: be sure to use the PCI config space value of BAR0/BAR1
+ * (the hardware's view of the base address), not the OS's
+ * value of pci_resource_start() as the OS value may be a cookie
+ * for ioremap/iomap.
+ */
+
+
+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+
+
+ /* mask off BAR0's flag bits 0 - 3 */
+ phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (SLIMOFF*sizeof(uint32_t));
+ if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ phba->slim2p->pcb.hgpAddrHigh = bar_high;
+ else
+ phba->slim2p->pcb.hgpAddrHigh = 0;
+ /* write HGP data to SLIM at the required longword offset */
+ memset(&hgp, 0, sizeof(HGP));
+ to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
+ lpfc_memcpy_to_slim(to_slim, &hgp, sizeof (HGP));
+
+ /* Setup Port Group ring pointer */
+ offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
+ (uint8_t *)phba->slim2p;
+ pdma_addr = phba->slim2p_mapping + offset;
+ phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
+
+ /* Use callback routine to setp rings in the pcb */
+ lpfc_config_pcb_setup(phba);
+
+ /* special handling for LC HBAs */
+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+ uint32_t hbainit[5];
+
+ lpfc_hba_init(phba, hbainit);
+
+ memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
+ }
+
+ /* Swap PCB if needed */
+ lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
+ sizeof (PCB_t));
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0405 Service Level Interface (SLI) 2 selected\n",
+ phba->brd_no);
+}
+
+void
+lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+ struct lpfc_sli *psli;
+
+ psli = &phba->sli;
+
+ list_add_tail(&mbq->list, &psli->mboxq);
+
+ psli->mboxq_cnt++;
+
+ return;
+}
+
+LPFC_MBOXQ_t *
+lpfc_mbox_get(struct lpfc_hba * phba)
+{
+ LPFC_MBOXQ_t *mbq = NULL;
+ struct lpfc_sli *psli = &phba->sli;
+
+ list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t,
+ list);
+ if (mbq) {
+ psli->mboxq_cnt--;
+ }
+
+ return mbq;
+}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
new file mode 100644
index 00000000000..4397e116071
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -0,0 +1,179 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_mem.c 1.79 2005/04/13 14:25:50EDT sf_support Exp $
+ */
+
+#include <linux/mempool.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
+#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
+
+static void *
+lpfc_pool_kmalloc(unsigned int gfp_flags, void *data)
+{
+ return kmalloc((unsigned long)data, gfp_flags);
+}
+
+static void
+lpfc_pool_kfree(void *obj, void *data)
+{
+ kfree(obj);
+}
+
+int
+lpfc_mem_alloc(struct lpfc_hba * phba)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ int i;
+
+ phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
+ phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
+ if (!phba->lpfc_scsi_dma_buf_pool)
+ goto fail;
+
+ phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+ LPFC_BPL_SIZE, 8,0);
+ if (!phba->lpfc_mbuf_pool)
+ goto fail_free_dma_buf_pool;
+
+ pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
+ LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
+ pool->max_count = 0;
+ pool->current_count = 0;
+ for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
+ pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+ GFP_KERNEL, &pool->elements[i].phys);
+ if (!pool->elements[i].virt)
+ goto fail_free_mbuf_pool;
+ pool->max_count++;
+ pool->current_count++;
+ }
+
+ phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+ lpfc_pool_kmalloc, lpfc_pool_kfree,
+ (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
+ if (!phba->mbox_mem_pool)
+ goto fail_free_mbuf_pool;
+
+ phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
+ lpfc_pool_kmalloc, lpfc_pool_kfree,
+ (void *)(unsigned long)sizeof(struct lpfc_nodelist));
+ if (!phba->nlp_mem_pool)
+ goto fail_free_mbox_pool;
+
+ return 0;
+
+ fail_free_mbox_pool:
+ mempool_destroy(phba->mbox_mem_pool);
+ fail_free_mbuf_pool:
+ while (--i)
+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ pool->elements[i].phys);
+ kfree(pool->elements);
+ pci_pool_destroy(phba->lpfc_mbuf_pool);
+ fail_free_dma_buf_pool:
+ pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ fail:
+ return -ENOMEM;
+}
+
+void
+lpfc_mem_free(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ LPFC_MBOXQ_t *mbox, *next_mbox;
+ struct lpfc_dmabuf *mp;
+ int i;
+
+ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
+ mp = (struct lpfc_dmabuf *) (mbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ list_del(&mbox->list);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ if (psli->mbox_active) {
+ mbox = psli->mbox_active;
+ mp = (struct lpfc_dmabuf *) (mbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ psli->mbox_active = NULL;
+ }
+
+ for (i = 0; i < pool->current_count; i++)
+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ pool->elements[i].phys);
+ kfree(pool->elements);
+ mempool_destroy(phba->nlp_mem_pool);
+ mempool_destroy(phba->mbox_mem_pool);
+
+ pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ pci_pool_destroy(phba->lpfc_mbuf_pool);
+}
+
+void *
+lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ void *ret;
+
+ ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+
+ if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
+ pool->current_count--;
+ ret = pool->elements[pool->current_count].virt;
+ *handle = pool->elements[pool->current_count].phys;
+ }
+ return ret;
+}
+
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+
+ if (pool->current_count < pool->max_count) {
+ pool->elements[pool->current_count].virt = virt;
+ pool->elements[pool->current_count].phys = dma;
+ pool->current_count++;
+ } else {
+ pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+ }
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
new file mode 100644
index 00000000000..e7470a4738c
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -0,0 +1,1842 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_nportdisc.c 1.179 2005/04/13 11:59:13EDT sf_support Exp $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+
+/* Called to verify a rcv'ed ADISC was intended for us. */
+static int
+lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ struct lpfc_name * nn, struct lpfc_name * pn)
+{
+ /* Compare the ADISC rsp WWNN / WWPN matches our internal node
+ * table entry for that node.
+ */
+ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
+ return (0);
+
+ if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
+ return (0);
+
+ /* we match, return success */
+ return (1);
+}
+
+
+int
+lpfc_check_sparm(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, struct serv_parm * sp,
+ uint32_t class)
+{
+ volatile struct serv_parm *hsp = &phba->fc_sparam;
+ /* First check for supported version */
+
+ /* Next check for class validity */
+ if (sp->cls1.classValid) {
+
+ if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
+ sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
+ if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
+ sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
+ } else if (class == CLASS1) {
+ return (0);
+ }
+
+ if (sp->cls2.classValid) {
+
+ if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
+ sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
+ if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
+ sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
+ } else if (class == CLASS2) {
+ return (0);
+ }
+
+ if (sp->cls3.classValid) {
+
+ if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
+ sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
+ if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
+ sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
+ } else if (class == CLASS3) {
+ return (0);
+ }
+
+ if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
+ sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
+ if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
+ sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
+
+ /* If check is good, copy wwpn wwnn into ndlp */
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+ memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
+ return (1);
+}
+
+static void *
+lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
+ struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_dmabuf *pcmd, *prsp;
+ uint32_t *lp;
+ void *ptr = NULL;
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ /* For lpfc_els_abort, context2 could be zero'ed to delay
+ * freeing associated memory till after ABTS completes.
+ */
+ if (pcmd) {
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
+ list);
+ if (prsp) {
+ lp = (uint32_t *) prsp->virt;
+ ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
+ }
+ }
+ else {
+ /* Force ulpStatus error since we are returning NULL ptr */
+ if (!(irsp->ulpStatus)) {
+ irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
+ irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ }
+ ptr = NULL;
+ }
+ return (ptr);
+}
+
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with a LPFC_NODELIST entry. This
+ * routine effectively results in a "software abort".
+ */
+int
+lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ int send_abts)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd;
+ int found = 0;
+
+ /* Abort outstanding I/O on NPort <nlp_DID> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d:0201 Abort outstanding I/O on NPort x%x "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
+
+ psli = &phba->sli;
+ pring = &psli->ring[LPFC_ELS_RING];
+
+ /* First check the txq */
+ do {
+ found = 0;
+ spin_lock_irq(phba->host->host_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport we are looking
+ for */
+ if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
+ found = 1;
+ /* It matches, so deque and call compl with an
+ error */
+ list_del(&iocb->list);
+ pring->txq_cnt--;
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ spin_unlock_irq(phba->host->host_lock);
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ spin_lock_irq(phba->host->host_lock);
+ } else {
+ list_add_tail(&iocb->list,
+ &phba->lpfc_iocb_list);
+ }
+ break;
+ }
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ } while (found);
+
+ /* Everything on txcmplq will be returned by firmware
+ * with a no rpi / linkdown / abort error. For ring 0,
+ * ELS discovery, we want to get rid of it right here.
+ */
+ /* Next check the txcmplq */
+ do {
+ found = 0;
+ spin_lock_irq(phba->host->host_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ /* Check to see if iocb matches the nport we are looking
+ for */
+ if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
+ found = 1;
+ /* It matches, so deque and call compl with an
+ error */
+ list_del(&iocb->list);
+ pring->txcmplq_cnt--;
+
+ icmd = &iocb->iocb;
+ /* If the driver is completing an ELS
+ * command early, flush it out of the firmware.
+ */
+ if (send_abts &&
+ (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
+ (icmd->un.elsreq64.bdl.ulpIoTag32)) {
+ lpfc_sli_issue_abort_iotag32(phba,
+ pring, iocb);
+ }
+ if (iocb->iocb_cmpl) {
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ spin_unlock_irq(phba->host->host_lock);
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ spin_lock_irq(phba->host->host_lock);
+ } else {
+ list_add_tail(&iocb->list,
+ &phba->lpfc_iocb_list);
+ }
+ break;
+ }
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ } while(found);
+
+ /* If we are delaying issuing an ELS command, cancel it */
+ if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+ }
+ return (0);
+}
+
+static int
+lpfc_rcv_plogi(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ IOCB_t *icmd;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+ struct ls_rjt stat;
+ int rc;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ if (phba->hba_state <= LPFC_FLOGI) {
+ /* Before responding to PLOGI, check for pt2pt mode.
+ * If we are pt2pt, with an outstanding FLOGI, abort
+ * the FLOGI and resend it first.
+ */
+ if (phba->fc_flag & FC_PT2PT) {
+ lpfc_els_abort_flogi(phba);
+ if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ /* If the other side is supposed to initiate
+ * the PLOGI anyway, just ACC it now and
+ * move on with discovery.
+ */
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ /* Start discovery - this should just do
+ CLEAR_LA */
+ lpfc_disc_start(phba);
+ }
+ else {
+ lpfc_initial_flogi(phba);
+ }
+ }
+ else {
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
+ ndlp);
+ return 0;
+ }
+ }
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ return (0);
+ }
+ icmd = &cmdiocb->iocb;
+
+ /* PLOGI chkparm OK */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
+ ndlp->nlp_rpi);
+
+ if ((phba->cfg_fcp_class == 2) &&
+ (sp->cls2.classValid)) {
+ ndlp->nlp_fcp_info |= CLASS2;
+ } else {
+ ndlp->nlp_fcp_info |= CLASS3;
+ }
+ ndlp->nlp_class_sup = 0;
+ if (sp->cls1.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS1;
+ if (sp->cls2.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS2;
+ if (sp->cls3.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS3;
+ if (sp->cls4.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS4;
+ ndlp->nlp_maxframe =
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+ /* no need to reg_login if we are already in one of these states */
+ switch(ndlp->nlp_state) {
+ case NLP_STE_NPR_NODE:
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+ break;
+ case NLP_STE_REG_LOGIN_ISSUE:
+ case NLP_STE_PRLI_ISSUE:
+ case NLP_STE_UNMAPPED_NODE:
+ case NLP_STE_MAPPED_NODE:
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
+ return (1);
+ }
+
+ if ((phba->fc_flag & FC_PT2PT)
+ && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ /* rcv'ed PLOGI decides what our NPortId will be */
+ phba->fc_myDID = icmd->un.rcvels.parmRo;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox == NULL)
+ goto out;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox
+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
+ lpfc_can_disctmo(phba);
+ }
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox == NULL)
+ goto out;
+
+ if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
+ (uint8_t *) sp, mbox, 0)) {
+ mempool_free( mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
+ /* ACC PLOGI rsp command needs to execute first,
+ * queue this mbox command to be processed later.
+ */
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ mbox->context2 = ndlp;
+ ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
+
+ /* If there is an outstanding PLOGI issued, abort it before
+ * sending ACC rsp to PLOGI recieved.
+ */
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+ }
+ ndlp->nlp_flag |= NLP_RCV_PLOGI;
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
+ return (1);
+
+out:
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ return (0);
+}
+
+static int
+lpfc_rcv_padisc(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ struct serv_parm *sp;
+ struct lpfc_name *pnn, *ppn;
+ struct ls_rjt stat;
+ ADISC *ap;
+ IOCB_t *icmd;
+ uint32_t *lp;
+ uint32_t cmd;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+
+ cmd = *lp++;
+ if (cmd == ELS_CMD_ADISC) {
+ ap = (ADISC *) lp;
+ pnn = (struct lpfc_name *) & ap->nodeName;
+ ppn = (struct lpfc_name *) & ap->portName;
+ } else {
+ sp = (struct serv_parm *) lp;
+ pnn = (struct lpfc_name *) & sp->nodeName;
+ ppn = (struct lpfc_name *) & sp->portName;
+ }
+
+ icmd = &cmdiocb->iocb;
+ if ((icmd->ulpStatus == 0) &&
+ (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
+ if (cmd == ELS_CMD_ADISC) {
+ lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
+ }
+ else {
+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
+ NULL, 0);
+ }
+ return (1);
+ }
+ /* Reject this request because invalid parameters */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ /* 1 sec timeout */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ return (0);
+}
+
+static int
+lpfc_rcv_logo(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
+ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
+ * PLOGIs during LOGO storms from a device.
+ */
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ /* Only try to re-login if this is NOT a Fabric Node */
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ }
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ /* The driver has to wait until the ACC completes before it continues
+ * processing the LOGO. The action will resume in
+ * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
+ * unreg_login, the driver waits so the ACC does not get aborted.
+ */
+ return (0);
+}
+
+static void
+lpfc_rcv_prli(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ uint32_t *lp;
+ PRLI *npr;
+ struct fc_rport *rport = ndlp->rport;
+ u32 roles;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+ (npr->prliType == PRLI_FCP_TYPE)) {
+ if (npr->initiatorFunc)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->targetFunc)
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->Retry)
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
+ if (rport) {
+ /* We need to update the rport role values */
+ roles = FC_RPORT_ROLE_UNKNOWN;
+ if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ fc_remote_port_rolechg(rport, roles);
+ }
+}
+
+static uint32_t
+lpfc_disc_set_adisc(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp)
+{
+ /* Check config parameter use-adisc or FCP-2 */
+ if ((phba->cfg_use_adisc == 0) &&
+ !(phba->fc_flag & FC_RSCN_MODE)) {
+ if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
+ return (0);
+ }
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_ADISC;
+ spin_unlock_irq(phba->host->host_lock);
+ return (1);
+}
+
+static uint32_t
+lpfc_disc_noop(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* This routine does nothing, just return the current state */
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_disc_illegal(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0253 Illegal State Transition: node x%x event x%x, "
+ "state x%x Data: x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ return (ndlp->nlp_state);
+}
+
+/* Start of Discovery State Machine routines */
+
+static uint32_t
+lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ ndlp->nlp_state = NLP_STE_UNUSED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+ return (ndlp->nlp_state);
+ }
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_issue_els_logo(phba, ndlp, 0);
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_rm_unused_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = arg;
+ struct lpfc_dmabuf *pcmd;
+ struct serv_parm *sp;
+ uint32_t *lp;
+ struct ls_rjt stat;
+ int port_cmp;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ lp = (uint32_t *) pcmd->virt;
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+
+ /* For a PLOGI, we only accept if our portname is less
+ * than the remote portname.
+ */
+ phba->fc_stat.elsLogiCol++;
+ port_cmp = memcmp(&phba->fc_portname, &sp->portName,
+ sizeof (struct lpfc_name));
+
+ if (port_cmp >= 0) {
+ /* Reject this request because the remote node will accept
+ ours */
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ }
+ else {
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ } /* if our portname was less */
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+
+ if (evt == NLP_EVT_RCV_LOGO) {
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ }
+ else {
+ lpfc_issue_els_logo(phba, ndlp, 0);
+ }
+
+ /* Put ndlp in npr list set plogi timer for 1 sec */
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ uint32_t *lp;
+ IOCB_t *irsp;
+ struct serv_parm *sp;
+ LPFC_MBOXQ_t *mbox;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ return (ndlp->nlp_state);
+ }
+
+ irsp = &rspiocb->iocb;
+
+ if (irsp->ulpStatus)
+ goto out;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ prsp = list_get_first(&pcmd->list,
+ struct lpfc_dmabuf,
+ list);
+ lp = (uint32_t *) prsp->virt;
+
+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
+ goto out;
+
+ /* PLOGI chkparm OK */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_ELS,
+ "%d:0121 PLOGI chkparm OK "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+
+ if ((phba->cfg_fcp_class == 2) &&
+ (sp->cls2.classValid)) {
+ ndlp->nlp_fcp_info |= CLASS2;
+ } else {
+ ndlp->nlp_fcp_info |= CLASS3;
+ }
+ ndlp->nlp_class_sup = 0;
+ if (sp->cls1.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS1;
+ if (sp->cls2.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS2;
+ if (sp->cls3.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS3;
+ if (sp->cls4.classValid)
+ ndlp->nlp_class_sup |= FC_COS_CLASS4;
+ ndlp->nlp_maxframe =
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ sp->cmn.bbRcvSizeLsb;
+
+ if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL)))
+ goto out;
+
+ lpfc_unreg_rpi(phba, ndlp);
+ if (lpfc_reg_login
+ (phba, irsp->un.elsreq64.remoteID,
+ (uint8_t *) sp, mbox, 0) == 0) {
+ /* set_slim mailbox command needs to
+ * execute first, queue this command to
+ * be processed later.
+ */
+ switch(ndlp->nlp_DID) {
+ case NameServer_DID:
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_ns_reg_login;
+ break;
+ case FDMI_DID:
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_fdmi_reg_login;
+ break;
+ default:
+ mbox->mbox_cmpl =
+ lpfc_mbx_cmpl_reg_login;
+ }
+ mbox->context2 = ndlp;
+ if (lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ != MBX_NOT_FINISHED) {
+ ndlp->nlp_state =
+ NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_list(phba, ndlp,
+ NLP_REGLOGIN_LIST);
+ return (ndlp->nlp_state);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ } else {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+
+
+ out:
+ /* Free this node since the driver cannot login or has the wrong
+ sparm */
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ return (ndlp->nlp_state);
+ }
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 0);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
+ ADISC *ap;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+ irsp = &rspiocb->iocb;
+
+ if ((irsp->ulpStatus) ||
+ (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ /* 1 sec timeout */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+
+ memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
+ memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ lpfc_unreg_rpi(phba, ndlp);
+ return (ndlp->nlp_state);
+ }
+ ndlp->nlp_state = NLP_STE_MAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+
+ lpfc_disc_set_adisc(phba, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp,
+ void *arg, uint32_t evt)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ uint32_t did;
+
+ pmb = (LPFC_MBOXQ_t *) arg;
+ mb = &pmb->mb;
+ did = mb->un.varWords[1];
+ if (mb->mbxStatus) {
+ /* RegLogin failed */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_DISCOVERY,
+ "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
+ phba->brd_no,
+ did, mb->mbxStatus, phba->hba_state);
+
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+
+ lpfc_issue_els_logo(phba, ndlp, 0);
+ /* Put ndlp in npr list set plogi timer for 1 sec */
+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ return (ndlp->nlp_state);
+ }
+
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+
+ /* Only if we are not a fabric nport do we issue PRLI */
+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
+ lpfc_issue_els_prli(phba, ndlp, 0);
+ } else {
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* Software abort outstanding PRLI before sending acc */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+/* This routine is envoked when we rcv a PRLO request from a nport
+ * we are logged into. We should send back a PRLO rsp setting the
+ * appropriate bits.
+ * NEXT STATE = PRLI_ISSUE
+ */
+static uint32_t
+lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
+ PRLI *npr;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+ rspiocb = cmdiocb->context_un.rsp_iocb;
+ npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus) {
+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
+ return (ndlp->nlp_state);
+ }
+
+ /* Check out PRLI rsp */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+ (npr->prliType == PRLI_FCP_TYPE)) {
+ if (npr->initiatorFunc)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->targetFunc)
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->Retry)
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
+
+ ndlp->nlp_state = NLP_STE_MAPPED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
+ return (ndlp->nlp_state);
+}
+
+/*! lpfc_device_rm_prli_issue
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * This routine is envoked when we a request to remove a nport we are in the
+ * process of PRLIing. We should software abort outstanding prli, unreg
+ * login, send a logout. We will change node state to UNUSED_NODE, put it
+ * on plogi list so it can be freed when LOGO completes.
+ *
+ */
+static uint32_t
+lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* software abort outstanding PRLI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+
+/*! lpfc_device_recov_prli_issue
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * The routine is envoked when the state of a device is unknown, like
+ * during a link down. We should remove the nodelist entry from the
+ * unmapped list, issue a UNREG_LOGIN, do a software abort of the
+ * outstanding PRLI command, then free the node entry.
+ */
+static uint32_t
+lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ /* software abort outstanding PRLI */
+ lpfc_els_abort(phba, ndlp, 1);
+
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_prli(phba, ndlp, cmdiocb);
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ lpfc_disc_set_adisc(phba, ndlp);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* flush the target */
+ spin_lock_irq(phba->host->host_lock);
+ lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+ spin_unlock_irq(phba->host->host_lock);
+
+ /* Treat like rcv logo */
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_disc_set_adisc(phba, ndlp);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* Ignore PLOGI if we have an outstanding LOGO */
+ if (ndlp->nlp_flag & NLP_LOGO_SND) {
+ return (ndlp->nlp_state);
+ }
+
+ if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
+ spin_unlock_irq(phba->host->host_lock);
+ return (ndlp->nlp_state);
+ }
+
+ /* send PLOGI immediately, move to PLOGI issue state */
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+ struct ls_rjt stat;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, 0);
+ } else {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
+ lpfc_issue_els_adisc(phba, ndlp, 0);
+ } else {
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ }
+ }
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb;
+
+ cmdiocb = (struct lpfc_iocbq *) arg;
+
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+
+ if (ndlp->nlp_flag & NLP_DELAY_TMO) {
+ if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
+ return (ndlp->nlp_state);
+ } else {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(phba->host->host_lock);
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+ }
+ }
+
+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
+ lpfc_issue_els_plogi(phba, ndlp, 0);
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ lpfc_unreg_rpi(phba, ndlp);
+ /* This routine does nothing, just return the current state */
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+
+ pmb = (LPFC_MBOXQ_t *) arg;
+ mb = &pmb->mb;
+
+ /* save rpi */
+ if (ndlp->nlp_rpi != 0)
+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
+
+ ndlp->nlp_rpi = mb->un.varWords[0];
+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
+
+ return (ndlp->nlp_state);
+}
+
+static uint32_t
+lpfc_device_rm_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return (NLP_STE_FREED_NODE);
+}
+
+static uint32_t
+lpfc_device_recov_npr_node(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg,
+ uint32_t evt)
+{
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(phba->host->host_lock);
+ return (ndlp->nlp_state);
+}
+
+
+/* This next section defines the NPort Discovery State Machine */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The plogi list and adisc list are used when Link Up discovery or RSCN
+ * processing is needed. Each list holds the nodes that we will send PLOGI
+ * or ADISC on. These lists will keep track of what nodes will be effected
+ * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
+ * The unmapped_list will contain all nodes that we have successfully logged
+ * into at the Fibre Channel level. The mapped_list will contain all nodes
+ * that are mapped FCP targets.
+ */
+/*
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+/* For UNUSED_NODE state, the node has just been allocated .
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to UNMAPPED_NODE. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added or removed (via DEVICE_RM) to / from
+ * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list. 32 entries are processed initially and
+ * ADISC is initited for each one. Completions / Events for each node are
+ * funnelled thru the state machine. As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list. 32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine. As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * indentically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
+ (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
+ /* Action routine Event Current State */
+ lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
+ lpfc_rcv_els_unused_node, /* RCV_PRLI */
+ lpfc_rcv_logo_unused_node, /* RCV_LOGO */
+ lpfc_rcv_els_unused_node, /* RCV_ADISC */
+ lpfc_rcv_els_unused_node, /* RCV_PDISC */
+ lpfc_rcv_els_unused_node, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_unused_node, /* DEVICE_RM */
+ lpfc_disc_illegal, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
+ lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
+ lpfc_rcv_els_plogi_issue, /* RCV_LOGO */
+ lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
+ lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
+ lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
+ lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_plogi_issue, /* DEVICE_RM */
+ lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
+ lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
+ lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_adisc_issue, /* DEVICE_RM */
+ lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
+ lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
+ lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
+ lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
+ lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
+ lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
+ lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
+ lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
+ lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
+ lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_prli_issue, /* DEVICE_RM */
+ lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
+ lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
+ lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
+ lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
+ lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
+ lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_disc_illegal, /* DEVICE_RM */
+ lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
+ lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
+ lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
+ lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
+ lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
+ lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
+ lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_disc_illegal, /* CMPL_PRLI */
+ lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_disc_illegal, /* CMPL_ADISC */
+ lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_disc_illegal, /* DEVICE_RM */
+ lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
+
+ lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
+ lpfc_rcv_prli_npr_node, /* RCV_PRLI */
+ lpfc_rcv_logo_npr_node, /* RCV_LOGO */
+ lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
+ lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
+ lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
+ lpfc_disc_noop, /* CMPL_PLOGI */
+ lpfc_disc_noop, /* CMPL_PRLI */
+ lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
+ lpfc_disc_noop, /* CMPL_ADISC */
+ lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
+ lpfc_device_rm_npr_node, /* DEVICE_RM */
+ lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
+};
+
+int
+lpfc_disc_state_machine(struct lpfc_hba * phba,
+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+{
+ uint32_t cur_state, rc;
+ uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
+ uint32_t);
+
+ ndlp->nlp_disc_refcnt++;
+ cur_state = ndlp->nlp_state;
+
+ /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0211 DSM in event x%x on NPort x%x in state %d "
+ "Data: x%x\n",
+ phba->brd_no,
+ evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+
+ func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
+ rc = (func) (phba, ndlp, arg, evt);
+
+ /* DSM out state <rc> on NPort <nlp_DID> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_DISCOVERY,
+ "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
+ phba->brd_no,
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ ndlp->nlp_disc_refcnt--;
+
+ /* Check to see if ndlp removal is deferred */
+ if ((ndlp->nlp_disc_refcnt == 0)
+ && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
+ spin_lock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_nlp_remove(phba, ndlp);
+ return (NLP_STE_FREED_NODE);
+ }
+ if (rc == NLP_STE_FREED_NODE)
+ return (NLP_STE_FREED_NODE);
+ ndlp->nlp_state = rc;
+ return (rc);
+}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
new file mode 100644
index 00000000000..42fab03ad2b
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -0,0 +1,1246 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_scsi.c 1.37 2005/04/13 14:27:09EDT sf_support Exp $
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_version.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+
+#define LPFC_RESET_WAIT 2
+#define LPFC_ABORT_WAIT 2
+
+static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
+{
+ fcmd->fcpLunLsl = 0;
+ fcmd->fcpLunMsl = swab16((uint16_t)lun);
+}
+
+/*
+ * This routine allocates a scsi buffer, which contains all the necessary
+ * information needed to initiate a SCSI I/O. The non-DMAable buffer region
+ * contains information to build the IOCB. The DMAable region contains
+ * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
+ * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
+ * and the BPL BDE is setup in the IOCB.
+ */
+static struct lpfc_scsi_buf *
+lpfc_get_scsi_buf(struct lpfc_hba * phba)
+{
+ struct lpfc_scsi_buf *psb;
+ struct ulp_bde64 *bpl;
+ IOCB_t *iocb;
+ dma_addr_t pdma_phys;
+
+ psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+ if (!psb)
+ return NULL;
+ memset(psb, 0, sizeof (struct lpfc_scsi_buf));
+ psb->scsi_hba = phba;
+
+ /*
+ * Get memory from the pci pool to map the virt space to pci bus space
+ * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
+ * struct fcp_rsp and the number of bde's necessary to support the
+ * sg_tablesize.
+ */
+ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
+ &psb->dma_handle);
+ if (!psb->data) {
+ kfree(psb);
+ return NULL;
+ }
+
+ /* Initialize virtual ptrs to dma_buf region. */
+ memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+ psb->fcp_cmnd = psb->data;
+ psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+ psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
+
+ /* Initialize local short-hand pointers. */
+ bpl = psb->fcp_bpl;
+ pdma_phys = psb->dma_handle;
+
+ /*
+ * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
+ * list bdes. Initialize the first two and leave the rest for
+ * queuecommand.
+ */
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
+ bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
+ bpl->tus.f.bdeFlags = BUFF_USE_CMND;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+
+ /* Setup the physical region for the FCP RSP */
+ pdma_phys += sizeof (struct fcp_cmnd);
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
+ bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
+ bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ /*
+ * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
+ * initialize it with all known data now.
+ */
+ pdma_phys += (sizeof (struct fcp_rsp));
+ iocb = &psb->cur_iocbq.iocb;
+ iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+ iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
+ iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
+ iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ iocb->ulpBdeCount = 1;
+ iocb->ulpClass = CLASS3;
+
+ return psb;
+}
+
+static void
+lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
+{
+ struct lpfc_hba *phba = psb->scsi_hba;
+
+ /*
+ * There are only two special cases to consider. (1) the scsi command
+ * requested scatter-gather usage or (2) the scsi command allocated
+ * a request buffer, but did not request use_sg. There is a third
+ * case, but it does not require resource deallocation.
+ */
+ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
+ dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
+ psb->seg_cnt, psb->pCmd->sc_data_direction);
+ } else {
+ if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
+ dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
+ psb->pCmd->request_bufflen,
+ psb->pCmd->sc_data_direction);
+ }
+ }
+
+ list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
+}
+
+static int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct scatterlist *sgel = NULL;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ dma_addr_t physaddr;
+ uint32_t i, num_bde = 0;
+ int datadir = scsi_cmnd->sc_data_direction;
+ int dma_error;
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither. Start the lpfc command prep by
+ * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+ * data bde entry.
+ */
+ bpl += 2;
+ if (scsi_cmnd->use_sg) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+ sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
+ lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
+ scsi_cmnd->use_sg, datadir);
+ if (lpfc_cmd->seg_cnt == 0)
+ return 1;
+
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ printk(KERN_ERR "%s: Too many sg segments from "
+ "dma_map_sg. Config %d, seg_cnt %d",
+ __FUNCTION__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
+ dma_unmap_sg(&phba->pcidev->dev, sgel,
+ lpfc_cmd->seg_cnt, datadir);
+ return 1;
+ }
+
+ /*
+ * The driver established a maximum scatter-gather segment count
+ * during probe that limits the number of sg elements in any
+ * single scsi command. Just run through the seg_cnt and format
+ * the bde's.
+ */
+ for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
+ physaddr = sg_dma_address(sgel);
+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ if (datadir == DMA_TO_DEVICE)
+ bpl->tus.f.bdeFlags = 0;
+ else
+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+ sgel++;
+ num_bde++;
+ }
+ } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
+ physaddr = dma_map_single(&phba->pcidev->dev,
+ scsi_cmnd->request_buffer,
+ scsi_cmnd->request_bufflen,
+ datadir);
+ dma_error = dma_mapping_error(physaddr);
+ if (dma_error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "%d:0718 Unable to dma_map_single "
+ "request_buffer: x%x\n",
+ phba->brd_no, dma_error);
+ return 1;
+ }
+
+ lpfc_cmd->nonsg_phys = physaddr;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
+ if (datadir == DMA_TO_DEVICE)
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ num_bde = 1;
+ bpl++;
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are dependent on the
+ * scsi_cmnd request_buffer
+ */
+ iocb_cmd->un.fcpi64.bdl.bdeSize +=
+ (num_bde * sizeof (struct ulp_bde64));
+ iocb_cmd->ulpBdeCount = 1;
+ iocb_cmd->ulpLe = 1;
+ fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
+ return 0;
+}
+
+static void
+lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
+ uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
+ uint32_t resp_info = fcprsp->rspStatus2;
+ uint32_t scsi_status = fcprsp->rspStatus3;
+ uint32_t host_status = DID_OK;
+ uint32_t rsplen = 0;
+
+ /*
+ * If this is a task management command, there is no
+ * scsi packet associated with this lpfc_cmd. The driver
+ * consumes it.
+ */
+ if (fcpcmd->fcpCntl2) {
+ scsi_status = 0;
+ goto out;
+ }
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+ "%d:0730 FCP command failed: RSP "
+ "Data: x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no, resp_info, scsi_status,
+ be32_to_cpu(fcprsp->rspResId),
+ be32_to_cpu(fcprsp->rspSnsLen),
+ be32_to_cpu(fcprsp->rspRspLen),
+ fcprsp->rspInfo3);
+
+ if (resp_info & RSP_LEN_VALID) {
+ rsplen = be32_to_cpu(fcprsp->rspRspLen);
+ if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
+ (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
+ host_status = DID_ERROR;
+ goto out;
+ }
+ }
+
+ if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
+ uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
+ if (snslen > SCSI_SENSE_BUFFERSIZE)
+ snslen = SCSI_SENSE_BUFFERSIZE;
+
+ memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
+ }
+
+ cmnd->resid = 0;
+ if (resp_info & RESID_UNDER) {
+ cmnd->resid = be32_to_cpu(fcprsp->rspResId);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0716 FCP Read Underrun, expected %d, "
+ "residual %d Data: x%x x%x x%x\n", phba->brd_no,
+ be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
+ fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
+
+ /*
+ * The cmnd->underflow is the minimum number of bytes that must
+ * be transfered for this command. Provided a sense condition
+ * is not present, make sure the actual amount transferred is at
+ * least the underflow value or fail.
+ */
+ if (!(resp_info & SNS_LEN_VALID) &&
+ (scsi_status == SAM_STAT_GOOD) &&
+ (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0717 FCP command x%x residual "
+ "underrun converted to error "
+ "Data: x%x x%x x%x\n", phba->brd_no,
+ cmnd->cmnd[0], cmnd->request_bufflen,
+ cmnd->resid, cmnd->underflow);
+
+ host_status = DID_ERROR;
+ }
+ } else if (resp_info & RESID_OVER) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+ "%d:0720 FCP command x%x residual "
+ "overrun error. Data: x%x x%x \n",
+ phba->brd_no, cmnd->cmnd[0],
+ cmnd->request_bufflen, cmnd->resid);
+ host_status = DID_ERROR;
+
+ /*
+ * Check SLI validation that all the transfer was actually done
+ * (fcpi_parm should be zero). Apply check only to reads.
+ */
+ } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
+ (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+ "%d:0734 FCP Read Check Error Data: "
+ "x%x x%x x%x x%x\n", phba->brd_no,
+ be32_to_cpu(fcpcmd->fcpDl),
+ be32_to_cpu(fcprsp->rspResId),
+ fcpi_parm, cmnd->cmnd[0]);
+ host_status = DID_ERROR;
+ cmnd->resid = cmnd->request_bufflen;
+ }
+
+ out:
+ cmnd->result = ScsiResult(host_status, scsi_status);
+}
+
+static void
+lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ struct lpfc_iocbq *pIocbOut)
+{
+ struct lpfc_scsi_buf *lpfc_cmd =
+ (struct lpfc_scsi_buf *) pIocbIn->context1;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ unsigned long iflag;
+
+ lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
+ lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+
+ if (lpfc_cmd->status) {
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+ (lpfc_cmd->result & IOERR_DRVR_MASK))
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ else if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+ "%d:0729 FCP cmd x%x failed <%d/%d> status: "
+ "x%x result: x%x Data: x%x x%x\n",
+ phba->brd_no, cmd->cmnd[0], cmd->device->id,
+ cmd->device->lun, lpfc_cmd->status,
+ lpfc_cmd->result, pIocbOut->iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+
+ switch (lpfc_cmd->status) {
+ case IOSTAT_FCP_RSP_ERROR:
+ /* Call FCP RSP handler to determine result */
+ lpfc_handle_fcp_err(lpfc_cmd);
+ break;
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ cmd->result = ScsiResult(DID_BUS_BUSY, 0);
+ break;
+ default:
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ break;
+ }
+
+ if (pnode) {
+ if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
+ cmd->result = ScsiResult(DID_BUS_BUSY,
+ SAM_STAT_BUSY);
+ }
+ else {
+ cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ }
+ } else {
+ cmd->result = ScsiResult(DID_OK, 0);
+ }
+
+ if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+ uint32_t *lp = (uint32_t *)cmd->sense_buffer;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
+ "SNS x%x x%x Data: x%x x%x\n",
+ phba->brd_no, cmd->device->id,
+ cmd->device->lun, cmd, cmd->result,
+ *lp, *(lp + 3), cmd->retries, cmd->resid);
+ }
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ lpfc_free_scsi_buf(lpfc_cmd);
+ cmd->host_scribble = NULL;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+
+ cmd->scsi_done(cmd);
+}
+
+static void
+lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
+ struct lpfc_nodelist *pnode)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
+ int datadir = scsi_cmnd->sc_data_direction;
+
+ lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+
+ lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
+
+ memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
+
+ if (scsi_cmnd->device->tagged_supported) {
+ switch (scsi_cmnd->tag) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->fcpCntl1 = ORDERED_Q;
+ break;
+ default:
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+ break;
+ }
+ } else
+ fcp_cmnd->fcpCntl1 = 0;
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither. Start the lpfc command prep by
+ * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+ * data bde entry.
+ */
+ if (scsi_cmnd->use_sg) {
+ if (datadir == DMA_TO_DEVICE) {
+ iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+ iocb_cmd->un.fcpi.fcpi_parm = 0;
+ iocb_cmd->ulpPU = 0;
+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
+ phba->fc4OutputRequests++;
+ } else {
+ iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
+ iocb_cmd->un.fcpi.fcpi_parm =
+ scsi_cmnd->request_bufflen;
+ fcp_cmnd->fcpCntl3 = READ_DATA;
+ phba->fc4InputRequests++;
+ }
+ } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
+ if (datadir == DMA_TO_DEVICE) {
+ iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+ iocb_cmd->un.fcpi.fcpi_parm = 0;
+ iocb_cmd->ulpPU = 0;
+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
+ phba->fc4OutputRequests++;
+ } else {
+ iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
+ iocb_cmd->un.fcpi.fcpi_parm =
+ scsi_cmnd->request_bufflen;
+ fcp_cmnd->fcpCntl3 = READ_DATA;
+ phba->fc4InputRequests++;
+ }
+ } else {
+ iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
+ iocb_cmd->un.fcpi.fcpi_parm = 0;
+ iocb_cmd->ulpPU = 0;
+ fcp_cmnd->fcpCntl3 = 0;
+ phba->fc4ControlRequests++;
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are independent
+ * of the scsi_cmnd request_buffer
+ */
+ piocbq->iocb.ulpContext = pnode->nlp_rpi;
+ if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+ piocbq->iocb.ulpFCP2Rcvy = 1;
+
+ piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
+ piocbq->context1 = lpfc_cmd;
+ piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+}
+
+static int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd,
+ uint8_t task_mgmt_cmd)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_iocbq *piocbq;
+ IOCB_t *piocb;
+ struct fcp_cmnd *fcp_cmnd;
+ struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
+ struct lpfc_rport_data *rdata = scsi_dev->hostdata;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+
+ if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+ return 0;
+ }
+
+ psli = &phba->sli;
+ piocbq = &(lpfc_cmd->cur_iocbq);
+ piocb = &piocbq->iocb;
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
+ fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+
+ piocb->ulpCommand = CMD_FCP_ICMND64_CR;
+
+ piocb->ulpContext = ndlp->nlp_rpi;
+ if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
+ piocb->ulpFCP2Rcvy = 1;
+ }
+ piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+
+ /* ulpTimeout is only one byte */
+ if (lpfc_cmd->timeout > 0xff) {
+ /*
+ * Do not timeout the command at the firmware level.
+ * The driver will provide the timeout mechanism.
+ */
+ piocb->ulpTimeout = 0;
+ } else {
+ piocb->ulpTimeout = lpfc_cmd->timeout;
+ }
+
+ lpfc_cmd->rdata = rdata;
+
+ switch (task_mgmt_cmd) {
+ case FCP_LUN_RESET:
+ /* Issue LUN Reset to TGT <num> LUN <num> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_FCP,
+ "%d:0703 Issue LUN Reset to TGT %d LUN %d "
+ "Data: x%x x%x\n",
+ phba->brd_no,
+ scsi_dev->id, scsi_dev->lun,
+ ndlp->nlp_rpi, ndlp->nlp_flag);
+
+ break;
+ case FCP_ABORT_TASK_SET:
+ /* Issue Abort Task Set to TGT <num> LUN <num> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_FCP,
+ "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
+ "Data: x%x x%x\n",
+ phba->brd_no,
+ scsi_dev->id, scsi_dev->lun,
+ ndlp->nlp_rpi, ndlp->nlp_flag);
+
+ break;
+ case FCP_TARGET_RESET:
+ /* Issue Target Reset to TGT <num> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_FCP,
+ "%d:0702 Issue Target Reset to TGT %d "
+ "Data: x%x x%x\n",
+ phba->brd_no,
+ scsi_dev->id, ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ break;
+ }
+
+ return (1);
+}
+
+static int
+lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
+{
+ struct lpfc_iocbq *iocbq;
+ struct lpfc_iocbq *iocbqrsp = NULL;
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ int ret;
+
+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
+ if (!ret)
+ return FAILED;
+
+ lpfc_cmd->scsi_hba = phba;
+ iocbq = &lpfc_cmd->cur_iocbq;
+ list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
+ if (!iocbqrsp)
+ return FAILED;
+ memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
+
+ iocbq->iocb_flag |= LPFC_IO_POLL;
+ ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ iocbq, SLI_IOCB_HIGH_PRIORITY,
+ iocbqrsp,
+ lpfc_cmd->timeout);
+ if (ret != IOCB_SUCCESS) {
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ ret = FAILED;
+ } else {
+ ret = SUCCESS;
+ lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
+ lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+ (lpfc_cmd->result & IOERR_DRVR_MASK))
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ }
+
+ /*
+ * All outstanding txcmplq I/Os should have been aborted by the target.
+ * Unfortunately, some targets do not abide by this forcing the driver
+ * to double check.
+ */
+ lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+ lpfc_cmd->pCmd->device->id,
+ lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
+
+ /* Return response IOCB to free list. */
+ list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
+ return ret;
+}
+
+static void
+lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ struct lpfc_iocbq *pIocbOut)
+{
+ unsigned long iflag;
+ struct lpfc_scsi_buf *lpfc_cmd =
+ (struct lpfc_scsi_buf *) pIocbIn->context1;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ lpfc_free_scsi_buf(lpfc_cmd);
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+static void
+lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pIocbIn,
+ struct lpfc_iocbq *pIocbOut)
+{
+ struct scsi_cmnd *ml_cmd =
+ ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
+
+ lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
+ ml_cmd->host_scribble = NULL;
+}
+
+const char *
+lpfc_info(struct Scsi_Host *host)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
+ int len;
+ static char lpfcinfobuf[384];
+
+ memset(lpfcinfobuf,0,384);
+ if (phba && phba->pcidev){
+ strncpy(lpfcinfobuf, phba->ModelDesc, 256);
+ len = strlen(lpfcinfobuf);
+ snprintf(lpfcinfobuf + len,
+ 384-len,
+ " on PCI bus %02x device %02x irq %d",
+ phba->pcidev->bus->number,
+ phba->pcidev->devfn,
+ phba->pcidev->irq);
+ len = strlen(lpfcinfobuf);
+ if (phba->Port[0]) {
+ snprintf(lpfcinfobuf + len,
+ 384-len,
+ " port %s",
+ phba->Port);
+ }
+ }
+ return lpfcinfobuf;
+}
+
+static int
+lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+{
+ struct lpfc_hba *phba =
+ (struct lpfc_hba *) cmnd->device->host->hostdata[0];
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct lpfc_scsi_buf *lpfc_cmd = NULL;
+ struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+ int err = 0;
+
+ /*
+ * The target pointer is guaranteed not to be NULL because the driver
+ * only clears the device->hostdata field in lpfc_slave_destroy. This
+ * approach guarantees no further IO calls on this target.
+ */
+ if (!ndlp) {
+ cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+ goto out_fail_command;
+ }
+
+ /*
+ * A Fibre Channel target is present and functioning only when the node
+ * state is MAPPED. Any other state is a failure.
+ */
+ if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+ if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+ (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
+ cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+ goto out_fail_command;
+ }
+ /*
+ * The device is most likely recovered and the driver
+ * needs a bit more time to finish. Ask the midlayer
+ * to retry.
+ */
+ goto out_host_busy;
+ }
+
+ list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+ if (lpfc_cmd == NULL) {
+ printk(KERN_WARNING "%s: No buffer available - list empty, "
+ "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
+ goto out_host_busy;
+ }
+
+ /*
+ * Store the midlayer's command structure for the completion phase
+ * and complete the command initialization.
+ */
+ lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->rdata = rdata;
+ lpfc_cmd->timeout = 0;
+ cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+ cmnd->scsi_done = done;
+
+ err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+ if (err)
+ goto out_host_busy_free_buf;
+
+ lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
+
+ err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
+ &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+ if (err)
+ goto out_host_busy_free_buf;
+ return 0;
+
+ out_host_busy_free_buf:
+ lpfc_free_scsi_buf(lpfc_cmd);
+ cmnd->host_scribble = NULL;
+ out_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ out_fail_command:
+ done(cmnd);
+ return 0;
+}
+
+static int
+lpfc_abort_handler(struct scsi_cmnd *cmnd)
+{
+ struct lpfc_hba *phba =
+ (struct lpfc_hba *)cmnd->device->host->hostdata[0];
+ struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_iocbq *abtsiocb = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ IOCB_t *cmd, *icmd;
+ unsigned long snum;
+ unsigned int id, lun;
+ unsigned int loop_count = 0;
+ int ret = IOCB_SUCCESS;
+
+ /*
+ * If the host_scribble data area is NULL, then the driver has already
+ * completed this command, but the midlayer did not see the completion
+ * before the eh fired. Just return SUCCESS.
+ */
+ lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
+ if (!lpfc_cmd)
+ return SUCCESS;
+
+ /* save these now since lpfc_cmd can be freed */
+ id = lpfc_cmd->pCmd->device->id;
+ lun = lpfc_cmd->pCmd->device->lun;
+ snum = lpfc_cmd->pCmd->serial_number;
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ cmd = &iocb->iocb;
+ if (iocb->context1 != lpfc_cmd)
+ continue;
+
+ list_del_init(&iocb->list);
+ pring->txq_cnt--;
+ if (!iocb->iocb_cmpl) {
+ list_add_tail(&iocb->list, lpfc_iocb_list);
+ }
+ else {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
+ }
+
+ goto out;
+ }
+
+ list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
+ if (abtsiocb == NULL)
+ return FAILED;
+
+ memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
+
+ /*
+ * The scsi command was not in the txq. Check the txcmplq and if it is
+ * found, send an abort to the FW.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ if (iocb->context1 != lpfc_cmd)
+ continue;
+
+ iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
+ cmd = &iocb->iocb;
+ icmd = &abtsiocb->iocb;
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+ icmd->ulpLe = 1;
+ icmd->ulpClass = cmd->ulpClass;
+ if (phba->hba_state >= LPFC_LINK_UP)
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+
+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
+ IOCB_ERROR) {
+ list_add_tail(&abtsiocb->list, lpfc_iocb_list);
+ ret = IOCB_ERROR;
+ break;
+ }
+
+ /* Wait for abort to complete */
+ while (cmnd->host_scribble)
+ {
+ spin_unlock_irq(phba->host->host_lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(LPFC_ABORT_WAIT*HZ);
+ spin_lock_irq(phba->host->host_lock);
+ if (++loop_count
+ > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
+ break;
+ }
+
+ if(cmnd->host_scribble) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "%d:0748 abort handler timed "
+ "out waiting for abort to "
+ "complete. Data: "
+ "x%x x%x x%x x%lx\n",
+ phba->brd_no, ret, id, lun, snum);
+ cmnd->host_scribble = NULL;
+ iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
+ ret = IOCB_ERROR;
+ }
+
+ break;
+ }
+
+ out:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+ "%d:0749 SCSI layer issued abort device "
+ "Data: x%x x%x x%x x%lx\n",
+ phba->brd_no, ret, id, lun, snum);
+
+ return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
+}
+
+static int
+lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_scsi_buf *lpfc_cmd = NULL;
+ struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
+ struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ int ret = FAILED;
+ int cnt, loopcnt;
+
+ /*
+ * If target is not in a MAPPED state, delay the reset until
+ * target is rediscovered or nodev timeout expires.
+ */
+ while ( 1 ) {
+ if (!pnode)
+ break;
+
+ if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
+ spin_unlock_irq(phba->host->host_lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout( HZ/2);
+ spin_lock_irq(phba->host->host_lock);
+ }
+ if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
+ break;
+ }
+
+ list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+ if (lpfc_cmd == NULL)
+ goto out;
+
+ lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->timeout = 60;
+ lpfc_cmd->scsi_hba = phba;
+
+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
+ if (!ret)
+ goto out_free_scsi_buf;
+
+ iocbq = &lpfc_cmd->cur_iocbq;
+
+ /* get a buffer for this IOCB command response */
+ list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
+ if (iocbqrsp == NULL)
+ goto out_free_scsi_buf;
+
+ memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
+
+ iocbq->iocb_flag |= LPFC_IO_POLL;
+ iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
+
+ ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
+ &phba->sli.ring[psli->fcp_ring],
+ iocbq, 0, iocbqrsp, 60);
+ if (ret == IOCB_SUCCESS)
+ ret = SUCCESS;
+
+ lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
+ lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
+ if (lpfc_cmd->result & IOERR_DRVR_MASK)
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+
+ /*
+ * All outstanding txcmplq I/Os should have been aborted by the target.
+ * Unfortunately, some targets do not abide by this forcing the driver
+ * to double check.
+ */
+ lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
+ cmnd->device->id, cmnd->device->lun, 0,
+ LPFC_CTX_LUN);
+
+ loopcnt = 0;
+ while((cnt = lpfc_sli_sum_iocb(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ cmnd->device->id, cmnd->device->lun,
+ LPFC_CTX_LUN))) {
+ spin_unlock_irq(phba->host->host_lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(LPFC_RESET_WAIT*HZ);
+ spin_lock_irq(phba->host->host_lock);
+
+ if (++loopcnt
+ > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
+ break;
+ }
+
+ if (cnt) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
+ phba->brd_no, cnt);
+ }
+
+ list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
+
+out_free_scsi_buf:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "%d:0713 SCSI layer issued LUN reset (%d, %d) "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, lpfc_cmd->pCmd->device->id,
+ lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
+ lpfc_cmd->result);
+ lpfc_free_scsi_buf(lpfc_cmd);
+out:
+ return ret;
+}
+
+/*
+ * Note: midlayer calls this function with the host_lock held
+ */
+static int
+lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
+ struct lpfc_nodelist *ndlp = NULL;
+ int match;
+ int ret = FAILED, i, err_count = 0;
+ int cnt, loopcnt;
+ unsigned int midlayer_id = 0;
+ struct lpfc_scsi_buf * lpfc_cmd = NULL;
+ struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+
+ list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+ if (lpfc_cmd == NULL)
+ goto out;
+
+ /* The lpfc_cmd storage is reused. Set all loop invariants. */
+ lpfc_cmd->timeout = 60;
+ lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->scsi_hba = phba;
+
+ /*
+ * Since the driver manages a single bus device, reset all
+ * targets known to the driver. Should any target reset
+ * fail, this routine returns failure to the midlayer.
+ */
+ midlayer_id = cmnd->device->id;
+ for (i = 0; i < MAX_FCP_TARGET; i++) {
+ /* Search the mapped list for this target ID */
+ match = 0;
+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
+ if ((i == ndlp->nlp_sid) && ndlp->rport) {
+ match = 1;
+ break;
+ }
+ }
+ if (!match)
+ continue;
+
+ lpfc_cmd->pCmd->device->id = i;
+ lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
+ ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
+ if (ret != SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0713 Bus Reset on target %d failed\n",
+ phba->brd_no, i);
+ err_count++;
+ }
+ }
+
+ cmnd->device->id = midlayer_id;
+ loopcnt = 0;
+ while((cnt = lpfc_sli_sum_iocb(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ 0, 0, LPFC_CTX_HOST))) {
+ spin_unlock_irq(phba->host->host_lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(LPFC_RESET_WAIT*HZ);
+ spin_lock_irq(phba->host->host_lock);
+
+ if (++loopcnt
+ > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
+ break;
+ }
+
+ if (cnt) {
+ /* flush all outstanding commands on the host */
+ i = lpfc_sli_abort_iocb(phba,
+ &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
+ LPFC_CTX_HOST);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
+ phba->brd_no, cnt, i);
+ }
+
+ if (!err_count)
+ ret = SUCCESS;
+
+ lpfc_free_scsi_buf(lpfc_cmd);
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_FCP,
+ "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
+ phba->brd_no, ret);
+out:
+ return ret;
+}
+
+static int
+lpfc_slave_alloc(struct scsi_device *sdev)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
+ struct lpfc_nodelist *ndlp = NULL;
+ int match = 0;
+ struct lpfc_scsi_buf *scsi_buf = NULL;
+ uint32_t total = 0, i;
+ uint32_t num_to_alloc = 0;
+ unsigned long flags;
+ struct list_head *listp;
+ struct list_head *node_list[6];
+
+ /*
+ * Store the target pointer in the scsi_device hostdata pointer provided
+ * the driver has already discovered the target id.
+ */
+
+ /* Search the nlp lists other than unmap_list for this target ID */
+ node_list[0] = &phba->fc_npr_list;
+ node_list[1] = &phba->fc_nlpmap_list;
+ node_list[2] = &phba->fc_prli_list;
+ node_list[3] = &phba->fc_reglogin_list;
+ node_list[4] = &phba->fc_adisc_list;
+ node_list[5] = &phba->fc_plogi_list;
+
+ for (i = 0; i < 6 && !match; i++) {
+ listp = node_list[i];
+ if (list_empty(listp))
+ continue;
+ list_for_each_entry(ndlp, listp, nlp_listp) {
+ if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
+ match = 1;
+ break;
+ }
+ }
+ }
+
+ if (!match)
+ return -ENXIO;
+
+ sdev->hostdata = ndlp->rport->dd_data;
+
+ /*
+ * Populate the cmds_per_lun count scsi_bufs into this host's globally
+ * available list of scsi buffers. Don't allocate more than the
+ * HBA limit conveyed to the midlayer via the host structure. Note
+ * that this list of scsi bufs exists for the lifetime of the driver.
+ */
+ total = phba->total_scsi_bufs;
+ num_to_alloc = LPFC_CMD_PER_LUN;
+ if (total >= phba->cfg_hba_queue_depth) {
+ printk(KERN_WARNING "%s, At config limitation of "
+ "%d allocated scsi_bufs\n", __FUNCTION__, total);
+ return 0;
+ } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
+ num_to_alloc = phba->cfg_hba_queue_depth - total;
+ }
+
+ for (i = 0; i < num_to_alloc; i++) {
+ scsi_buf = lpfc_get_scsi_buf(phba);
+ if (!scsi_buf) {
+ printk(KERN_ERR "%s, failed to allocate "
+ "scsi_buf\n", __FUNCTION__);
+ break;
+ }
+
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ phba->total_scsi_bufs++;
+ list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+ }
+ return 0;
+}
+
+static int
+lpfc_slave_configure(struct scsi_device *sdev)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
+ struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+
+ if (sdev->tagged_supported)
+ scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
+
+ /*
+ * Initialize the fc transport attributes for the target
+ * containing this scsi device. Also note that the driver's
+ * target pointer is stored in the starget_data for the
+ * driver's sysfs entry point functions.
+ */
+ rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
+
+ return 0;
+}
+
+static void
+lpfc_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+ return;
+}
+
+struct scsi_host_template lpfc_template = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler= lpfc_reset_lun_handler,
+ .eh_bus_reset_handler = lpfc_reset_bus_handler,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .this_id = -1,
+ .sg_tablesize = LPFC_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = lpfc_host_attrs,
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
new file mode 100644
index 00000000000..4aafba47628
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -0,0 +1,157 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_scsi.h 1.83 2005/04/07 08:47:43EDT sf_support Exp $
+ */
+
+struct lpfc_hba;
+
+#define list_remove_head(list, entry, type, member) \
+ if (!list_empty(list)) { \
+ entry = list_entry((list)->next, type, member); \
+ list_del_init(&entry->member); \
+ }
+
+#define list_get_first(list, type, member) \
+ (list_empty(list)) ? NULL : \
+ list_entry((list)->next, type, member)
+
+/* per-port data that is allocated in the FC transport for us */
+struct lpfc_rport_data {
+ struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
+};
+
+struct fcp_rsp {
+ uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
+ uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
+
+ uint8_t rspStatus0; /* FCP_STATUS byte 0 (reserved) */
+ uint8_t rspStatus1; /* FCP_STATUS byte 1 (reserved) */
+ uint8_t rspStatus2; /* FCP_STATUS byte 2 field validity */
+#define RSP_LEN_VALID 0x01 /* bit 0 */
+#define SNS_LEN_VALID 0x02 /* bit 1 */
+#define RESID_OVER 0x04 /* bit 2 */
+#define RESID_UNDER 0x08 /* bit 3 */
+ uint8_t rspStatus3; /* FCP_STATUS byte 3 SCSI status byte */
+
+ uint32_t rspResId; /* Residual xfer if residual count field set in
+ fcpStatus2 */
+ /* Received in Big Endian format */
+ uint32_t rspSnsLen; /* Length of sense data in fcpSnsInfo */
+ /* Received in Big Endian format */
+ uint32_t rspRspLen; /* Length of FCP response data in fcpRspInfo */
+ /* Received in Big Endian format */
+
+ uint8_t rspInfo0; /* FCP_RSP_INFO byte 0 (reserved) */
+ uint8_t rspInfo1; /* FCP_RSP_INFO byte 1 (reserved) */
+ uint8_t rspInfo2; /* FCP_RSP_INFO byte 2 (reserved) */
+ uint8_t rspInfo3; /* FCP_RSP_INFO RSP_CODE byte 3 */
+
+#define RSP_NO_FAILURE 0x00
+#define RSP_DATA_BURST_ERR 0x01
+#define RSP_CMD_FIELD_ERR 0x02
+#define RSP_RO_MISMATCH_ERR 0x03
+#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */
+#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */
+
+ uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */
+
+ uint8_t rspSnsInfo[128];
+#define SNS_ILLEGAL_REQ 0x05 /* sense key is byte 3 ([2]) */
+#define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */
+};
+
+struct fcp_cmnd {
+ uint32_t fcpLunMsl; /* most significant lun word (32 bits) */
+ uint32_t fcpLunLsl; /* least significant lun word (32 bits) */
+ /* # of bits to shift lun id to end up in right
+ * payload word, little endian = 8, big = 16.
+ */
+#if __BIG_ENDIAN
+#define FC_LUN_SHIFT 16
+#define FC_ADDR_MODE_SHIFT 24
+#else /* __LITTLE_ENDIAN */
+#define FC_LUN_SHIFT 8
+#define FC_ADDR_MODE_SHIFT 0
+#endif
+
+ uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
+ uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
+#define SIMPLE_Q 0x00
+#define HEAD_OF_Q 0x01
+#define ORDERED_Q 0x02
+#define ACA_Q 0x04
+#define UNTAGGED 0x05
+ uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */
+#define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */
+#define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */
+#define FCP_BUS_RESET 0x08 /* bit 3 */
+#define FCP_LUN_RESET 0x10 /* bit 4 */
+#define FCP_TARGET_RESET 0x20 /* bit 5 */
+#define FCP_CLEAR_ACA 0x40 /* bit 6 */
+#define FCP_TERMINATE_TASK 0x80 /* bit 7 */
+ uint8_t fcpCntl3;
+#define WRITE_DATA 0x01 /* Bit 0 */
+#define READ_DATA 0x02 /* Bit 1 */
+
+ uint8_t fcpCdb[16]; /* SRB cdb field is copied here */
+ uint32_t fcpDl; /* Total transfer length */
+
+};
+
+struct lpfc_scsi_buf {
+ struct list_head list;
+ struct scsi_cmnd *pCmd;
+ struct lpfc_hba *scsi_hba;
+ struct lpfc_rport_data *rdata;
+
+ uint32_t timeout;
+
+ uint16_t status; /* From IOCB Word 7- ulpStatus */
+ uint32_t result; /* From IOCB Word 4. */
+
+ uint32_t seg_cnt; /* Number of scatter-gather segments returned by
+ * dma_map_sg. The driver needs this for calls
+ * to dma_unmap_sg. */
+ dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
+
+ /*
+ * data and dma_handle are the kernel virutal and bus address of the
+ * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
+ * gather bde list that supports the sg_tablesize value.
+ */
+ void *data;
+ dma_addr_t dma_handle;
+
+ struct fcp_cmnd *fcp_cmnd;
+ struct fcp_rsp *fcp_rsp;
+ struct ulp_bde64 *fcp_bpl;
+
+ /* cur_iocbq has phys of the dma-able buffer.
+ * Iotag is in here
+ */
+ struct lpfc_iocbq cur_iocbq;
+};
+
+#define LPFC_SCSI_DMA_EXT_SIZE 264
+#define LPFC_BPL_SIZE 1024
+
+#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
new file mode 100644
index 00000000000..8d14b28c80b
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -0,0 +1,2885 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_sli.c 1.232 2005/04/13 11:59:16EDT sf_support Exp $
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_compat.h"
+
+/*
+ * Define macro to log: Mailbox command x%x cannot issue Data
+ * This allows multiple uses of lpfc_msgBlk0311
+ * w/o perturbing log msg utility.
+ */
+#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
+ lpfc_printf_log(phba, \
+ KERN_INFO, \
+ LOG_MBOX | LOG_SLI, \
+ "%d:0311 Mailbox command x%x cannot issue " \
+ "Data: x%x x%x x%x\n", \
+ phba->brd_no, \
+ mb->mbxCommand, \
+ phba->hba_state, \
+ psli->sli_flag, \
+ flag);
+
+
+/* There are only four IOCB completion types. */
+typedef enum _lpfc_iocb_type {
+ LPFC_UNKNOWN_IOCB,
+ LPFC_UNSOL_IOCB,
+ LPFC_SOL_IOCB,
+ LPFC_ABORT_IOCB
+} lpfc_iocb_type;
+
+/*
+ * Translate the iocb command to an iocb command type used to decide the final
+ * disposition of each completed IOCB.
+ */
+static lpfc_iocb_type
+lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
+{
+ lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
+
+ if (iocb_cmnd > CMD_MAX_IOCB_CMD)
+ return 0;
+
+ switch (iocb_cmnd) {
+ case CMD_XMIT_SEQUENCE_CR:
+ case CMD_XMIT_SEQUENCE_CX:
+ case CMD_XMIT_BCAST_CN:
+ case CMD_XMIT_BCAST_CX:
+ case CMD_ELS_REQUEST_CR:
+ case CMD_ELS_REQUEST_CX:
+ case CMD_CREATE_XRI_CR:
+ case CMD_CREATE_XRI_CX:
+ case CMD_GET_RPI_CN:
+ case CMD_XMIT_ELS_RSP_CX:
+ case CMD_GET_RPI_CR:
+ case CMD_FCP_IWRITE_CR:
+ case CMD_FCP_IWRITE_CX:
+ case CMD_FCP_IREAD_CR:
+ case CMD_FCP_IREAD_CX:
+ case CMD_FCP_ICMND_CR:
+ case CMD_FCP_ICMND_CX:
+ case CMD_ADAPTER_MSG:
+ case CMD_ADAPTER_DUMP:
+ case CMD_XMIT_SEQUENCE64_CR:
+ case CMD_XMIT_SEQUENCE64_CX:
+ case CMD_XMIT_BCAST64_CN:
+ case CMD_XMIT_BCAST64_CX:
+ case CMD_ELS_REQUEST64_CR:
+ case CMD_ELS_REQUEST64_CX:
+ case CMD_FCP_IWRITE64_CR:
+ case CMD_FCP_IWRITE64_CX:
+ case CMD_FCP_IREAD64_CR:
+ case CMD_FCP_IREAD64_CX:
+ case CMD_FCP_ICMND64_CR:
+ case CMD_FCP_ICMND64_CX:
+ case CMD_GEN_REQUEST64_CR:
+ case CMD_GEN_REQUEST64_CX:
+ case CMD_XMIT_ELS_RSP64_CX:
+ type = LPFC_SOL_IOCB;
+ break;
+ case CMD_ABORT_XRI_CN:
+ case CMD_ABORT_XRI_CX:
+ case CMD_CLOSE_XRI_CN:
+ case CMD_CLOSE_XRI_CX:
+ case CMD_XRI_ABORTED_CX:
+ case CMD_ABORT_MXRI64_CN:
+ type = LPFC_ABORT_IOCB;
+ break;
+ case CMD_RCV_SEQUENCE_CX:
+ case CMD_RCV_ELS_REQ_CX:
+ case CMD_RCV_SEQUENCE64_CX:
+ case CMD_RCV_ELS_REQ64_CX:
+ type = LPFC_UNSOL_IOCB;
+ break;
+ default:
+ type = LPFC_UNKNOWN_IOCB;
+ break;
+ }
+
+ return type;
+}
+
+static int
+lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ MAILBOX_t *pmbox = &pmb->mb;
+ int i, rc;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ phba->hba_state = LPFC_INIT_MBX_CMDS;
+ lpfc_config_ring(phba, i, pmb);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0446 Adapter failed to init, "
+ "mbxCmd x%x CFG_RING, mbxStatus x%x, "
+ "ring %d\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus,
+ i);
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENXIO;
+ }
+ }
+ return 0;
+}
+
+static int
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
+{
+ uint16_t iotag;
+
+ list_add_tail(&piocb->list, &pring->txcmplq);
+ pring->txcmplq_cnt++;
+ if (unlikely(pring->ringno == LPFC_ELS_RING))
+ mod_timer(&phba->els_tmofunc,
+ jiffies + HZ * (phba->fc_ratov << 1));
+
+ if (pring->fast_lookup) {
+ /* Setup fast lookup based on iotag for completion */
+ iotag = piocb->iocb.ulpIoTag;
+ if (iotag && (iotag < pring->fast_iotag))
+ *(pring->fast_lookup + iotag) = piocb;
+ else {
+
+ /* Cmd ring <ringno> put: iotag <iotag> greater then
+ configured max <fast_iotag> wd0 <icmd> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0316 Cmd ring %d put: iotag x%x "
+ "greater then configured max x%x "
+ "wd0 x%x\n",
+ phba->brd_no,
+ pring->ringno, iotag,
+ pring->fast_iotag,
+ *(((uint32_t *)(&piocb->iocb)) + 7));
+ }
+ }
+ return (0);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+ struct list_head *dlp;
+ struct lpfc_iocbq *cmd_iocb;
+
+ dlp = &pring->txq;
+ cmd_iocb = NULL;
+ list_remove_head((&pring->txq), cmd_iocb,
+ struct lpfc_iocbq,
+ list);
+ if (cmd_iocb) {
+ /* If the first ptr is not equal to the list header,
+ * deque the IOCBQ_t and return it.
+ */
+ pring->txq_cnt--;
+ }
+ return (cmd_iocb);
+}
+
+static IOCB_t *
+lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ MAILBOX_t *mbox = (MAILBOX_t *)phba->sli.MBhostaddr;
+ PGP *pgp = (PGP *)&mbox->us.s2.port[pring->ringno];
+ uint32_t max_cmd_idx = pring->numCiocb;
+ IOCB_t *iocb = NULL;
+
+ if ((pring->next_cmdidx == pring->cmdidx) &&
+ (++pring->next_cmdidx >= max_cmd_idx))
+ pring->next_cmdidx = 0;
+
+ if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
+
+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+
+ if (unlikely(pring->local_getidx >= max_cmd_idx)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0315 Ring %d issue: portCmdGet %d "
+ "is bigger then cmd ring %d\n",
+ phba->brd_no, pring->ringno,
+ pring->local_getidx, max_cmd_idx);
+
+ phba->hba_state = LPFC_HBA_ERROR;
+ /*
+ * All error attention handlers are posted to
+ * worker thread
+ */
+ phba->work_ha |= HA_ERATT;
+ phba->work_hs = HS_FFER3;
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+
+ return NULL;
+ }
+
+ if (pring->local_getidx == pring->next_cmdidx)
+ return NULL;
+ }
+
+ iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
+
+ return iocb;
+}
+
+static uint32_t
+lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+ uint32_t search_start;
+
+ if (pring->fast_lookup == NULL) {
+ pring->iotag_ctr++;
+ if (pring->iotag_ctr >= pring->iotag_max)
+ pring->iotag_ctr = 1;
+ return pring->iotag_ctr;
+ }
+
+ search_start = pring->iotag_ctr;
+
+ do {
+ pring->iotag_ctr++;
+ if (pring->iotag_ctr >= pring->fast_iotag)
+ pring->iotag_ctr = 1;
+
+ if (*(pring->fast_lookup + pring->iotag_ctr) == NULL)
+ return pring->iotag_ctr;
+
+ } while (pring->iotag_ctr != search_start);
+
+ /*
+ * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
+ */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
+ phba->brd_no,
+ pring->ringno,
+ pring->fast_iotag);
+ return (0);
+}
+
+static void
+lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
+{
+ /*
+ * Allocate and set up an iotag
+ */
+ nextiocb->iocb.ulpIoTag =
+ lpfc_sli_next_iotag(phba, &phba->sli.ring[phba->sli.fcp_ring]);
+
+ /*
+ * Issue iocb command to adapter
+ */
+ lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
+ wmb();
+ pring->stats.iocb_cmd++;
+
+ /*
+ * If there is no completion routine to call, we can release the
+ * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
+ * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+ */
+ if (nextiocb->iocb_cmpl)
+ lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
+ else {
+ list_add_tail(&nextiocb->list, &phba->lpfc_iocb_list);
+ }
+
+ /*
+ * Let the HBA know what IOCB slot will be the next one the
+ * driver will put a command into.
+ */
+ pring->cmdidx = pring->next_cmdidx;
+ writeb(pring->cmdidx, phba->MBslimaddr
+ + (SLIMOFF + (pring->ringno * 2)) * 4);
+}
+
+static void
+lpfc_sli_update_full_ring(struct lpfc_hba * phba,
+ struct lpfc_sli_ring *pring)
+{
+ int ringno = pring->ringno;
+
+ pring->flag |= LPFC_CALL_RING_AVAILABLE;
+
+ wmb();
+
+ /*
+ * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
+ * The HBA will tell us when an IOCB entry is available.
+ */
+ writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+
+ pring->stats.iocb_cmd_full++;
+}
+
+static void
+lpfc_sli_update_ring(struct lpfc_hba * phba,
+ struct lpfc_sli_ring *pring)
+{
+ int ringno = pring->ringno;
+
+ /*
+ * Tell the HBA that there is work to do in this ring.
+ */
+ wmb();
+ writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+}
+
+static void
+lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+{
+ IOCB_t *iocb;
+ struct lpfc_iocbq *nextiocb;
+
+ /*
+ * Check to see if:
+ * (a) there is anything on the txq to send
+ * (b) link is up
+ * (c) link attention events can be processed (fcp ring only)
+ * (d) IOCB processing is not blocked by the outstanding mbox command.
+ */
+ if (pring->txq_cnt &&
+ (phba->hba_state > LPFC_LINK_DOWN) &&
+ (pring->ringno != phba->sli.fcp_ring ||
+ phba->sli.sli_flag & LPFC_PROCESS_LA) &&
+ !(pring->flag & LPFC_STOP_IOCB_MBX)) {
+
+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+ (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
+ lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+ if (iocb)
+ lpfc_sli_update_ring(phba, pring);
+ else
+ lpfc_sli_update_full_ring(phba, pring);
+ }
+
+ return;
+}
+
+/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
+static void
+lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
+{
+ PGP *pgp =
+ ((PGP *) &
+ (((MAILBOX_t *)phba->sli.MBhostaddr)->us.s2.port[ringno]));
+
+ /* If the ring is active, flag it */
+ if (phba->sli.ring[ringno].cmdringaddr) {
+ if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
+ phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
+ /*
+ * Force update of the local copy of cmdGetInx
+ */
+ phba->sli.ring[ringno].local_getidx
+ = le32_to_cpu(pgp->cmdGetInx);
+ spin_lock_irq(phba->host->host_lock);
+ lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
+ spin_unlock_irq(phba->host->host_lock);
+ }
+ }
+}
+
+static int
+lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
+{
+ uint8_t ret;
+
+ switch (mbxCommand) {
+ case MBX_LOAD_SM:
+ case MBX_READ_NV:
+ case MBX_WRITE_NV:
+ case MBX_RUN_BIU_DIAG:
+ case MBX_INIT_LINK:
+ case MBX_DOWN_LINK:
+ case MBX_CONFIG_LINK:
+ case MBX_CONFIG_RING:
+ case MBX_RESET_RING:
+ case MBX_READ_CONFIG:
+ case MBX_READ_RCONFIG:
+ case MBX_READ_SPARM:
+ case MBX_READ_STATUS:
+ case MBX_READ_RPI:
+ case MBX_READ_XRI:
+ case MBX_READ_REV:
+ case MBX_READ_LNK_STAT:
+ case MBX_REG_LOGIN:
+ case MBX_UNREG_LOGIN:
+ case MBX_READ_LA:
+ case MBX_CLEAR_LA:
+ case MBX_DUMP_MEMORY:
+ case MBX_DUMP_CONTEXT:
+ case MBX_RUN_DIAGS:
+ case MBX_RESTART:
+ case MBX_UPDATE_CFG:
+ case MBX_DOWN_LOAD:
+ case MBX_DEL_LD_ENTRY:
+ case MBX_RUN_PROGRAM:
+ case MBX_SET_MASK:
+ case MBX_SET_SLIM:
+ case MBX_UNREG_D_ID:
+ case MBX_CONFIG_FARP:
+ case MBX_LOAD_AREA:
+ case MBX_RUN_BIU_DIAG64:
+ case MBX_CONFIG_PORT:
+ case MBX_READ_SPARM64:
+ case MBX_READ_RPI64:
+ case MBX_REG_LOGIN64:
+ case MBX_READ_LA64:
+ case MBX_FLASH_WR_ULA:
+ case MBX_SET_DEBUG:
+ case MBX_LOAD_EXP_ROM:
+ ret = mbxCommand;
+ break;
+ default:
+ ret = MBX_SHUTDOWN;
+ break;
+ }
+ return (ret);
+}
+static void
+lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+ wait_queue_head_t *pdone_q;
+
+ /*
+ * If pdone_q is empty, the driver thread gave up waiting and
+ * continued running.
+ */
+ pdone_q = (wait_queue_head_t *) pmboxq->context1;
+ if (pdone_q)
+ wake_up_interruptible(pdone_q);
+ return;
+}
+
+void
+lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ struct lpfc_dmabuf *mp;
+ mp = (struct lpfc_dmabuf *) (pmb->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ mempool_free( pmb, phba->mbox_mem_pool);
+ return;
+}
+
+int
+lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
+{
+ MAILBOX_t *mbox;
+ MAILBOX_t *pmbox;
+ LPFC_MBOXQ_t *pmb;
+ struct lpfc_sli *psli;
+ int i, rc;
+ uint32_t process_next;
+
+ psli = &phba->sli;
+ /* We should only get here if we are in SLI2 mode */
+ if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
+ return (1);
+ }
+
+ phba->sli.slistat.mbox_event++;
+
+ /* Get a Mailbox buffer to setup mailbox commands for callback */
+ if ((pmb = phba->sli.mbox_active)) {
+ pmbox = &pmb->mb;
+ mbox = (MAILBOX_t *) phba->sli.MBhostaddr;
+
+ /* First check out the status word */
+ lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
+
+ /* Sanity check to ensure the host owns the mailbox */
+ if (pmbox->mbxOwner != OWN_HOST) {
+ /* Lets try for a while */
+ for (i = 0; i < 10240; i++) {
+ /* First copy command data */
+ lpfc_sli_pcimem_bcopy(mbox, pmbox,
+ sizeof (uint32_t));
+ if (pmbox->mbxOwner == OWN_HOST)
+ goto mbout;
+ }
+ /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
+ <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "%d:0304 Stray Mailbox Interrupt "
+ "mbxCommand x%x mbxStatus x%x\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus);
+
+ spin_lock_irq(phba->host->host_lock);
+ phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
+ return (1);
+ }
+
+ mbout:
+ del_timer_sync(&phba->sli.mbox_tmo);
+ phba->work_hba_events &= ~WORKER_MBOX_TMO;
+
+ /*
+ * It is a fatal error if unknown mbox command completion.
+ */
+ if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
+ MBX_SHUTDOWN) {
+
+ /* Unknow mailbox command compl */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "%d:0323 Unknown Mailbox command %x Cmpl\n",
+ phba->brd_no,
+ pmbox->mbxCommand);
+ phba->hba_state = LPFC_HBA_ERROR;
+ phba->work_hs = HS_FFER3;
+ lpfc_handle_eratt(phba);
+ return (0);
+ }
+
+ phba->sli.mbox_active = NULL;
+ if (pmbox->mbxStatus) {
+ phba->sli.slistat.mbox_stat_err++;
+ if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
+ /* Mbox cmd cmpl error - RETRYing */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0305 Mbox cmd cmpl error - "
+ "RETRYing Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus,
+ pmbox->un.varWords[0],
+ phba->hba_state);
+ pmbox->mbxStatus = 0;
+ pmbox->mbxOwner = OWN_HOST;
+ spin_lock_irq(phba->host->host_lock);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_SUCCESS)
+ return (0);
+ }
+ }
+
+ /* Mailbox cmd <cmd> Cmpl <cmpl> */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0307 Mailbox cmd x%x Cmpl x%p "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pmbox->mbxCommand,
+ pmb->mbox_cmpl,
+ *((uint32_t *) pmbox),
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1],
+ pmbox->un.varWords[2],
+ pmbox->un.varWords[3],
+ pmbox->un.varWords[4],
+ pmbox->un.varWords[5],
+ pmbox->un.varWords[6],
+ pmbox->un.varWords[7]);
+
+ if (pmb->mbox_cmpl) {
+ lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
+ pmb->mbox_cmpl(phba,pmb);
+ }
+ }
+
+
+ do {
+ process_next = 0; /* by default don't loop */
+ spin_lock_irq(phba->host->host_lock);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+
+ /* Process next mailbox command if there is one */
+ if ((pmb = lpfc_mbox_get(phba))) {
+ spin_unlock_irq(phba->host->host_lock);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+ pmb->mbox_cmpl(phba,pmb);
+ process_next = 1;
+ continue; /* loop back */
+ }
+ } else {
+ spin_unlock_irq(phba->host->host_lock);
+ /* Turn on IOCB processing */
+ for (i = 0; i < phba->sli.num_rings; i++) {
+ lpfc_sli_turn_on_ring(phba, i);
+ }
+
+ /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
+ while (!list_empty(&phba->freebufList)) {
+ struct lpfc_dmabuf *mp;
+
+ mp = NULL;
+ list_remove_head((&phba->freebufList),
+ mp,
+ struct lpfc_dmabuf,
+ list);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt,
+ mp->phys);
+ kfree(mp);
+ }
+ }
+ }
+
+ } while (process_next);
+
+ return (0);
+}
+static int
+lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *saveq)
+{
+ IOCB_t * irsp;
+ WORD5 * w5p;
+ uint32_t Rctl, Type;
+ uint32_t match, i;
+
+ match = 0;
+ irsp = &(saveq->iocb);
+ if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
+ || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
+ Rctl = FC_ELS_REQ;
+ Type = FC_ELS_DATA;
+ } else {
+ w5p =
+ (WORD5 *) & (saveq->iocb.un.
+ ulpWord[5]);
+ Rctl = w5p->hcsw.Rctl;
+ Type = w5p->hcsw.Type;
+
+ /* Firmware Workaround */
+ if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
+ (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
+ Rctl = FC_ELS_REQ;
+ Type = FC_ELS_DATA;
+ w5p->hcsw.Rctl = Rctl;
+ w5p->hcsw.Type = Type;
+ }
+ }
+ /* unSolicited Responses */
+ if (pring->prt[0].profile) {
+ (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, saveq);
+ match = 1;
+ } else {
+ /* We must search, based on rctl / type
+ for the right routine */
+ for (i = 0; i < pring->num_mask;
+ i++) {
+ if ((pring->prt[i].rctl ==
+ Rctl)
+ && (pring->prt[i].
+ type == Type)) {
+ (pring->prt[i].lpfc_sli_rcv_unsol_event)
+ (phba, pring, saveq);
+ match = 1;
+ break;
+ }
+ }
+ }
+ if (match == 0) {
+ /* Unexpected Rctl / Type received */
+ /* Ring <ringno> handler: unexpected
+ Rctl <Rctl> Type <Type> received */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "%d:0313 Ring %d handler: unexpected Rctl x%x "
+ "Type x%x received \n",
+ phba->brd_no,
+ pring->ringno,
+ Rctl,
+ Type);
+ }
+ return(1);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * prspiocb)
+{
+ IOCB_t *icmd = NULL;
+ IOCB_t *irsp = NULL;
+ struct lpfc_iocbq *cmd_iocb;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ uint16_t iotag;
+
+ irsp = &prspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ cmd_iocb = NULL;
+
+ /* Search through txcmpl from the begining */
+ list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
+ icmd = &iocb->iocb;
+ if (iotag == icmd->ulpIoTag) {
+ /* Found a match. */
+ cmd_iocb = iocb;
+ list_del(&iocb->list);
+ pring->txcmplq_cnt--;
+ break;
+ }
+ }
+
+ return (cmd_iocb);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * prspiocb)
+{
+ IOCB_t *irsp = NULL;
+ struct lpfc_iocbq *cmd_iocb = NULL;
+ uint16_t iotag;
+
+ if (unlikely(pring->fast_lookup == NULL))
+ return NULL;
+
+ /* Use fast lookup based on iotag for completion */
+ irsp = &prspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ if (iotag < pring->fast_iotag) {
+ cmd_iocb = *(pring->fast_lookup + iotag);
+ *(pring->fast_lookup + iotag) = NULL;
+ if (cmd_iocb) {
+ list_del(&cmd_iocb->list);
+ pring->txcmplq_cnt--;
+ return cmd_iocb;
+ } else {
+ /*
+ * This is clearly an error. A ring that uses iotags
+ * should never have a interrupt for a completion that
+ * is not on the ring. Return NULL and log a error.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0327 Rsp ring %d error - command "
+ "completion for iotag x%x not found\n",
+ phba->brd_no, pring->ringno, iotag);
+ return NULL;
+ }
+ }
+
+ /*
+ * Rsp ring <ringno> get: iotag <iotag> greater then
+ * configured max <fast_iotag> wd0 <irsp>. This is an
+ * error. Just return NULL.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0317 Rsp ring %d get: iotag x%x greater then "
+ "configured max x%x wd0 x%x\n",
+ phba->brd_no, pring->ringno, iotag, pring->fast_iotag,
+ *(((uint32_t *) irsp) + 7));
+ return NULL;
+}
+
+static int
+lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq *saveq)
+{
+ struct lpfc_iocbq * cmdiocbp;
+ int rc = 1;
+ unsigned long iflag;
+
+ /* Based on the iotag field, get the cmd IOCB from the txcmplq */
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ cmdiocbp = lpfc_sli_txcmpl_ring_search_slow(pring, saveq);
+ if (cmdiocbp) {
+ if (cmdiocbp->iocb_cmpl) {
+ /*
+ * Post all ELS completions to the worker thread.
+ * All other are passed to the completion callback.
+ */
+ if (pring->ringno == LPFC_ELS_RING) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ }
+ else {
+ if (cmdiocbp->iocb_flag & LPFC_IO_POLL)
+ rc = 0;
+
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ }
+ } else {
+ list_add_tail(&cmdiocbp->list, &phba->lpfc_iocb_list);
+ }
+ } else {
+ /*
+ * Unknown initiating command based on the response iotag.
+ * This could be the case on the ELS ring because of
+ * lpfc_els_abort().
+ */
+ if (pring->ringno != LPFC_ELS_RING) {
+ /*
+ * Ring <ringno> handler: unexpected completion IoTag
+ * <IoTag>
+ */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "%d:0322 Ring %d handler: unexpected "
+ "completion IoTag x%x Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pring->ringno,
+ saveq->iocb.ulpIoTag,
+ saveq->iocb.ulpStatus,
+ saveq->iocb.un.ulpWord[4],
+ saveq->iocb.ulpCommand,
+ saveq->iocb.ulpContext);
+ }
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return rc;
+}
+
+/*
+ * This routine presumes LPFC_FCP_RING handling and doesn't bother
+ * to check it explicitly.
+ */
+static int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, uint32_t mask)
+{
+ IOCB_t *irsp = NULL;
+ struct lpfc_iocbq *cmdiocbq = NULL;
+ struct lpfc_iocbq rspiocbq;
+ PGP *pgp;
+ uint32_t status;
+ uint32_t portRspPut, portRspMax;
+ int rc = 1;
+ lpfc_iocb_type type;
+ unsigned long iflag;
+ uint32_t rsp_cmpl = 0;
+ void __iomem *to_slim;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ pring->stats.iocb_event++;
+
+ /* The driver assumes SLI-2 mode */
+ pgp = (PGP *) &((MAILBOX_t *) phba->sli.MBhostaddr)
+ ->us.s2.port[pring->ringno];
+
+ /*
+ * The next available response entry should never exceed the maximum
+ * entries. If it does, treat it as an adapter hardware error.
+ */
+ portRspMax = pring->numRiocb;
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ if (unlikely(portRspPut >= portRspMax)) {
+ /*
+ * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
+ * rsp ring <portRspMax>
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0312 Ring %d handler: portRspPut %d "
+ "is bigger then rsp ring %d\n",
+ phba->brd_no, pring->ringno, portRspPut,
+ portRspMax);
+
+ phba->hba_state = LPFC_HBA_ERROR;
+
+ /* All error attention handlers are posted to worker thread */
+ phba->work_ha |= HA_ERATT;
+ phba->work_hs = HS_FFER3;
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return 1;
+ }
+
+ rmb();
+ while (pring->rspidx != portRspPut) {
+ irsp = (IOCB_t *) IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+ type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
+ pring->stats.iocb_rsp++;
+ rsp_cmpl++;
+
+ if (unlikely(irsp->ulpStatus)) {
+ /* Rsp ring <ringno> error: IOCB */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "%d:0326 Rsp Ring %d error: IOCB Data: "
+ "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no, pring->ringno,
+ irsp->un.ulpWord[0], irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2], irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4], irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7));
+ }
+
+ switch (type) {
+ case LPFC_ABORT_IOCB:
+ case LPFC_SOL_IOCB:
+ /*
+ * Idle exchange closed via ABTS from port. No iocb
+ * resources need to be recovered.
+ */
+ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
+ printk(KERN_INFO "%s: IOCB cmd 0x%x processed. "
+ "Skipping completion\n", __FUNCTION__,
+ irsp->ulpCommand);
+ break;
+ }
+
+ rspiocbq.iocb.un.ulpWord[4] = irsp->un.ulpWord[4];
+ rspiocbq.iocb.ulpStatus = irsp->ulpStatus;
+ rspiocbq.iocb.ulpContext = irsp->ulpContext;
+ rspiocbq.iocb.ulpIoTag = irsp->ulpIoTag;
+ cmdiocbq = lpfc_sli_txcmpl_ring_iotag_lookup(phba,
+ pring,
+ &rspiocbq);
+ if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
+ spin_unlock_irqrestore(
+ phba->host->host_lock, iflag);
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+ &rspiocbq);
+ spin_lock_irqsave(phba->host->host_lock,
+ iflag);
+ }
+ break;
+ default:
+ if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *) irsp,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown IOCB command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0321 Unknown IOCB command "
+ "Data: x%x, x%x x%x x%x x%x\n",
+ phba->brd_no, type, irsp->ulpCommand,
+ irsp->ulpStatus, irsp->ulpIoTag,
+ irsp->ulpContext);
+ }
+ break;
+ }
+
+ /*
+ * The response IOCB has been processed. Update the ring
+ * pointer in SLIM. If the port response put pointer has not
+ * been updated, sync the pgp->rspPutInx and fetch the new port
+ * response put pointer.
+ */
+ if (++pring->rspidx >= portRspMax)
+ pring->rspidx = 0;
+
+ to_slim = phba->MBslimaddr +
+ (SLIMOFF + (pring->ringno * 2) + 1) * 4;
+ writeb(pring->rspidx, to_slim);
+
+ if (pring->rspidx == portRspPut)
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ }
+
+ if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
+ pring->stats.iocb_rsp_full++;
+ status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+ writel(status, phba->CAregaddr);
+ readl(phba->CAregaddr);
+ }
+ if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+ pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+ pring->stats.iocb_cmd_empty++;
+
+ /* Force update of the local copy of cmdGetInx */
+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ lpfc_sli_resume_iocb(phba, pring);
+
+ if ((pring->lpfc_sli_cmd_available))
+ (pring->lpfc_sli_cmd_available) (phba, pring);
+
+ }
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return rc;
+}
+
+
+int
+lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, uint32_t mask)
+{
+ IOCB_t *entry;
+ IOCB_t *irsp = NULL;
+ struct lpfc_iocbq *rspiocbp = NULL;
+ struct lpfc_iocbq *next_iocb;
+ struct lpfc_iocbq *cmdiocbp;
+ struct lpfc_iocbq *saveq;
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ HGP *hgp;
+ PGP *pgp;
+ MAILBOX_t *mbox;
+ uint8_t iocb_cmd_type;
+ lpfc_iocb_type type;
+ uint32_t status, free_saveq;
+ uint32_t portRspPut, portRspMax;
+ int rc = 1;
+ unsigned long iflag;
+ void __iomem *to_slim;
+
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ pring->stats.iocb_event++;
+
+ /* The driver assumes SLI-2 mode */
+ mbox = (MAILBOX_t *) phba->sli.MBhostaddr;
+ pgp = (PGP *) & mbox->us.s2.port[pring->ringno];
+ hgp = (HGP *) & mbox->us.s2.host[pring->ringno];
+
+ /*
+ * The next available response entry should never exceed the maximum
+ * entries. If it does, treat it as an adapter hardware error.
+ */
+ portRspMax = pring->numRiocb;
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ if (portRspPut >= portRspMax) {
+ /*
+ * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
+ * rsp ring <portRspMax>
+ */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0312 Ring %d handler: portRspPut %d "
+ "is bigger then rsp ring %d\n",
+ phba->brd_no,
+ pring->ringno, portRspPut, portRspMax);
+
+ phba->hba_state = LPFC_HBA_ERROR;
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+
+ phba->work_hs = HS_FFER3;
+ lpfc_handle_eratt(phba);
+
+ return 1;
+ }
+
+ rmb();
+ lpfc_iocb_list = &phba->lpfc_iocb_list;
+ while (pring->rspidx != portRspPut) {
+ /*
+ * Build a completion list and call the appropriate handler.
+ * The process is to get the next available response iocb, get
+ * a free iocb from the list, copy the response data into the
+ * free iocb, insert to the continuation list, and update the
+ * next response index to slim. This process makes response
+ * iocb's in the ring available to DMA as fast as possible but
+ * pays a penalty for a copy operation. Since the iocb is
+ * only 32 bytes, this penalty is considered small relative to
+ * the PCI reads for register values and a slim write. When
+ * the ulpLe field is set, the entire Command has been
+ * received.
+ */
+ entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+ list_remove_head(lpfc_iocb_list, rspiocbp, struct lpfc_iocbq,
+ list);
+ if (rspiocbp == NULL) {
+ printk(KERN_ERR "%s: out of buffers! Failing "
+ "completion.\n", __FUNCTION__);
+ break;
+ }
+
+ lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
+ irsp = &rspiocbp->iocb;
+
+ if (++pring->rspidx >= portRspMax)
+ pring->rspidx = 0;
+
+ to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
+ + 1) * 4;
+ writeb(pring->rspidx, to_slim);
+
+ if (list_empty(&(pring->iocb_continueq))) {
+ list_add(&rspiocbp->list, &(pring->iocb_continueq));
+ } else {
+ list_add_tail(&rspiocbp->list,
+ &(pring->iocb_continueq));
+ }
+
+ pring->iocb_continueq_cnt++;
+ if (irsp->ulpLe) {
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_get_first(&pring->iocb_continueq,
+ struct lpfc_iocbq, list);
+ irsp = &(saveq->iocb);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
+
+ pring->stats.iocb_rsp++;
+
+ if (irsp->ulpStatus) {
+ /* Rsp ring <ringno> error: IOCB */
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "%d:0328 Rsp Ring %d error: IOCB Data: "
+ "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7));
+ }
+
+ /*
+ * Fetch the IOCB command type and call the correct
+ * completion routine. Solicited and Unsolicited
+ * IOCBs on the ELS ring get freed back to the
+ * lpfc_iocb_list by the discovery kernel thread.
+ */
+ iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+ type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+ if (type == LPFC_SOL_IOCB) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring,
+ saveq);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ } else if (type == LPFC_UNSOL_IOCB) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring,
+ saveq);
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ } else if (type == LPFC_ABORT_IOCB) {
+ if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
+ ((cmdiocbp =
+ lpfc_sli_txcmpl_ring_search_slow(pring,
+ saveq)))) {
+ /* Call the specified completion
+ routine */
+ if (cmdiocbp->iocb_cmpl) {
+ spin_unlock_irqrestore(
+ phba->host->host_lock,
+ iflag);
+ (cmdiocbp->iocb_cmpl) (phba,
+ cmdiocbp, saveq);
+ spin_lock_irqsave(
+ phba->host->host_lock,
+ iflag);
+ } else {
+ list_add_tail(&cmdiocbp->list,
+ lpfc_iocb_list);
+ }
+ }
+ } else if (type == LPFC_UNKNOWN_IOCB) {
+ if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+
+ memset(adaptermsg, 0,
+ LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *) irsp,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown IOCB command */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "%d:0321 Unknown IOCB command "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
+ }
+ }
+
+ if (free_saveq) {
+ if (!list_empty(&saveq->list)) {
+ list_for_each_entry_safe(rspiocbp,
+ next_iocb,
+ &saveq->list,
+ list) {
+ list_add_tail(&rspiocbp->list,
+ lpfc_iocb_list);
+ }
+ }
+
+ list_add_tail(&saveq->list, lpfc_iocb_list);
+ }
+ }
+
+ /*
+ * If the port response put pointer has not been updated, sync
+ * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
+ * response put pointer.
+ */
+ if (pring->rspidx == portRspPut) {
+ portRspPut = le32_to_cpu(pgp->rspPutInx);
+ }
+ } /* while (pring->rspidx != portRspPut) */
+
+ if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
+ /* At least one response entry has been freed */
+ pring->stats.iocb_rsp_full++;
+ /* SET RxRE_RSP in Chip Att register */
+ status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+ writel(status, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ }
+ if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+ pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+ pring->stats.iocb_cmd_empty++;
+
+ /* Force update of the local copy of cmdGetInx */
+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+ lpfc_sli_resume_iocb(phba, pring);
+
+ if ((pring->lpfc_sli_cmd_available))
+ (pring->lpfc_sli_cmd_available) (phba, pring);
+
+ }
+
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ return rc;
+}
+
+int
+lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd = NULL, *cmd = NULL;
+ int errcnt;
+ uint16_t iotag;
+
+ errcnt = 0;
+
+ /* Error everything on txq and txcmplq
+ * First do the txq.
+ */
+ spin_lock_irq(phba->host->host_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ list_del_init(&iocb->list);
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ spin_unlock_irq(phba->host->host_lock);
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ spin_lock_irq(phba->host->host_lock);
+ } else {
+ list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
+ }
+ }
+ pring->txq_cnt = 0;
+ INIT_LIST_HEAD(&(pring->txq));
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ cmd = &iocb->iocb;
+
+ /*
+ * Imediate abort of IOCB, clear fast_lookup entry,
+ * if any, deque and call compl
+ */
+ iotag = cmd->ulpIoTag;
+ if (iotag && pring->fast_lookup &&
+ (iotag < pring->fast_iotag))
+ pring->fast_lookup[iotag] = NULL;
+
+ list_del_init(&iocb->list);
+ pring->txcmplq_cnt--;
+
+ if (iocb->iocb_cmpl) {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ spin_unlock_irq(phba->host->host_lock);
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ spin_lock_irq(phba->host->host_lock);
+ } else {
+ list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
+ }
+ }
+
+ INIT_LIST_HEAD(&pring->txcmplq);
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(phba->host->host_lock);
+
+ return errcnt;
+}
+
+/******************************************************************************
+* lpfc_sli_send_reset
+*
+* Note: After returning from this function, the HBA cannot be accessed for
+* 1 ms. Since we do not wish to delay in interrupt context, it is the
+* responsibility of the caller to perform the mdelay(1) and flush via readl().
+******************************************************************************/
+static int
+lpfc_sli_send_reset(struct lpfc_hba * phba, uint16_t skip_post)
+{
+ MAILBOX_t *swpmb;
+ volatile uint32_t word0;
+ void __iomem *to_slim;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(phba->host->host_lock, flags);
+
+ /* A board reset must use REAL SLIM. */
+ phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+
+ word0 = 0;
+ swpmb = (MAILBOX_t *) & word0;
+ swpmb->mbxCommand = MBX_RESTART;
+ swpmb->mbxHc = 1;
+
+ to_slim = phba->MBslimaddr;
+ writel(*(uint32_t *) swpmb, to_slim);
+ readl(to_slim); /* flush */
+
+ /* Only skip post after fc_ffinit is completed */
+ if (skip_post) {
+ word0 = 1; /* This is really setting up word1 */
+ } else {
+ word0 = 0; /* This is really setting up word1 */
+ }
+ to_slim = phba->MBslimaddr + sizeof (uint32_t);
+ writel(*(uint32_t *) swpmb, to_slim);
+ readl(to_slim); /* flush */
+
+ /* Turn off parity checking and serr during the physical reset */
+ pci_read_config_word(phba->pcidev, PCI_COMMAND, &phba->pci_cfg_value);
+ pci_write_config_word(phba->pcidev, PCI_COMMAND,
+ (phba->pci_cfg_value &
+ ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+ writel(HC_INITFF, phba->HCregaddr);
+
+ phba->hba_state = LPFC_INIT_START;
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+ return 0;
+}
+
+static int
+lpfc_sli_brdreset(struct lpfc_hba * phba, uint16_t skip_post)
+{
+ struct lpfc_sli_ring *pring;
+ int i;
+ struct lpfc_dmabuf *mp, *next_mp;
+ unsigned long flags = 0;
+
+ lpfc_sli_send_reset(phba, skip_post);
+ mdelay(1);
+
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ /* Risk the write on flush case ie no delay after the readl */
+ readl(phba->HCregaddr); /* flush */
+ /* Now toggle INITFF bit set by lpfc_sli_send_reset */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* Restore PCI cmd register */
+ pci_write_config_word(phba->pcidev, PCI_COMMAND, phba->pci_cfg_value);
+
+ /* perform board reset */
+ phba->fc_eventTag = 0;
+ phba->fc_myDID = 0;
+ phba->fc_prevDID = Mask_DID;
+
+ /* Reset HBA */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_SLI,
+ "%d:0325 Reset HBA Data: x%x x%x x%x\n",
+ phba->brd_no,
+ phba->hba_state,
+ phba->sli.sli_flag,
+ skip_post);
+
+ /* Initialize relevant SLI info */
+ for (i = 0; i < phba->sli.num_rings; i++) {
+ pring = &phba->sli.ring[i];
+ pring->flag = 0;
+ pring->rspidx = 0;
+ pring->next_cmdidx = 0;
+ pring->local_getidx = 0;
+ pring->cmdidx = 0;
+ pring->missbufcnt = 0;
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+ if (skip_post) {
+ mdelay(100);
+ } else {
+ mdelay(2000);
+ }
+
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ /* Cleanup preposted buffers on the ELS ring */
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ list_del(&mp->list);
+ pring->postbufq_cnt--;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+ for (i = 0; i < phba->sli.num_rings; i++)
+ lpfc_sli_abort_iocb_ring(phba, &phba->sli.ring[i]);
+
+ return 0;
+}
+
+static int
+lpfc_sli_chipset_init(struct lpfc_hba *phba)
+{
+ uint32_t status, i = 0;
+
+ /* Read the HBA Host Status Register */
+ status = readl(phba->HSregaddr);
+
+ /* Check status register to see what current state is */
+ i = 0;
+ while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
+
+ /* Check every 100ms for 5 retries, then every 500ms for 5, then
+ * every 2.5 sec for 5, then reset board and every 2.5 sec for
+ * 4.
+ */
+ if (i++ >= 20) {
+ /* Adapter failed to init, timeout, status reg
+ <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0436 Adapter failed to init, "
+ "timeout, status reg x%x\n",
+ phba->brd_no,
+ status);
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ETIMEDOUT;
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status & HS_FFERM) {
+ /* ERROR: During chipset initialization */
+ /* Adapter failed to init, chipset, status reg
+ <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0437 Adapter failed to init, "
+ "chipset, status reg x%x\n",
+ phba->brd_no,
+ status);
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -EIO;
+ }
+
+ if (i <= 5) {
+ msleep(10);
+ } else if (i <= 10) {
+ msleep(500);
+ } else {
+ msleep(2500);
+ }
+
+ if (i == 15) {
+ lpfc_sli_brdreset(phba, 0);
+ }
+ /* Read the HBA Host Status Register */
+ status = readl(phba->HSregaddr);
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status & HS_FFERM) {
+ /* ERROR: During chipset initialization */
+ /* Adapter failed to init, chipset, status reg <status> */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "%d:0438 Adapter failed to init, chipset, "
+ "status reg x%x\n",
+ phba->brd_no,
+ status);
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -EIO;
+ }
+
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+
+ /* setup host attn register */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ return 0;
+}
+
+int
+lpfc_sli_hba_setup(struct lpfc_hba * phba)
+{
+ LPFC_MBOXQ_t *pmb;
+ uint32_t resetcount = 0, rc = 0, done = 0;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ while (resetcount < 2 && !done) {
+ phba->hba_state = 0;
+ lpfc_sli_brdreset(phba, 0);
+ msleep(2500);
+ rc = lpfc_sli_chipset_init(phba);
+ if (rc)
+ break;
+
+ resetcount++;
+
+ /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
+ * means the call was successful. Any other nonzero value is a failure,
+ * but if ERESTART is returned, the driver may reset the HBA and try
+ * again.
+ */
+ rc = lpfc_config_port_prep(phba);
+ if (rc == -ERESTART) {
+ phba->hba_state = 0;
+ continue;
+ } else if (rc) {
+ break;
+ }
+
+ phba->hba_state = LPFC_INIT_MBX_CMDS;
+ lpfc_config_port(phba, pmb);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc == MBX_SUCCESS)
+ done = 1;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0442 Adapter failed to init, mbxCmd x%x "
+ "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
+ phba->brd_no, pmb->mb.mbxCommand,
+ pmb->mb.mbxStatus, 0);
+ phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+ }
+ }
+ if (!done)
+ goto lpfc_sli_hba_setup_error;
+
+ rc = lpfc_sli_ring_map(phba, pmb);
+
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+
+ phba->sli.sli_flag |= LPFC_PROCESS_LA;
+
+ rc = lpfc_config_port_post(phba);
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+
+ goto lpfc_sli_hba_setup_exit;
+lpfc_sli_hba_setup_error:
+ phba->hba_state = LPFC_HBA_ERROR;
+lpfc_sli_hba_setup_exit:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+}
+
+static void
+lpfc_mbox_abort(struct lpfc_hba * phba)
+{
+ LPFC_MBOXQ_t *pmbox;
+ MAILBOX_t *mb;
+
+ if (phba->sli.mbox_active) {
+ del_timer_sync(&phba->sli.mbox_tmo);
+ phba->work_hba_events &= ~WORKER_MBOX_TMO;
+ pmbox = phba->sli.mbox_active;
+ mb = &pmbox->mb;
+ phba->sli.mbox_active = NULL;
+ if (pmbox->mbox_cmpl) {
+ mb->mbxStatus = MBX_NOT_FINISHED;
+ (pmbox->mbox_cmpl) (phba, pmbox);
+ }
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ }
+
+ /* Abort all the non active mailbox commands. */
+ spin_lock_irq(phba->host->host_lock);
+ pmbox = lpfc_mbox_get(phba);
+ while (pmbox) {
+ mb = &pmbox->mb;
+ if (pmbox->mbox_cmpl) {
+ mb->mbxStatus = MBX_NOT_FINISHED;
+ spin_unlock_irq(phba->host->host_lock);
+ (pmbox->mbox_cmpl) (phba, pmbox);
+ spin_lock_irq(phba->host->host_lock);
+ }
+ pmbox = lpfc_mbox_get(phba);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+
+/*! lpfc_mbox_timeout
+ *
+ * \pre
+ * \post
+ * \param hba Pointer to per struct lpfc_hba structure
+ * \param l1 Pointer to the driver's mailbox queue.
+ * \return
+ * void
+ *
+ * \b Description:
+ *
+ * This routine handles mailbox timeout events at timer interrupt context.
+ */
+void
+lpfc_mbox_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ spin_lock_irqsave(phba->host->host_lock, iflag);
+ if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
+ phba->work_hba_events |= WORKER_MBOX_TMO;
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ }
+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
+}
+
+void
+lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmbox;
+ MAILBOX_t *mb;
+
+ spin_lock_irq(phba->host->host_lock);
+ if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
+ spin_unlock_irq(phba->host->host_lock);
+ return;
+ }
+
+ pmbox = phba->sli.mbox_active;
+ mb = &pmbox->mb;
+
+ /* Mbox cmd <mbxCommand> timeout */
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->hba_state,
+ phba->sli.sli_flag,
+ phba->sli.mbox_active);
+
+ if (phba->sli.mbox_active == pmbox) {
+ phba->sli.mbox_active = NULL;
+ if (pmbox->mbox_cmpl) {
+ mb->mbxStatus = MBX_NOT_FINISHED;
+ spin_unlock_irq(phba->host->host_lock);
+ (pmbox->mbox_cmpl) (phba, pmbox);
+ spin_lock_irq(phba->host->host_lock);
+ }
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ }
+
+ spin_unlock_irq(phba->host->host_lock);
+ lpfc_mbox_abort(phba);
+ return;
+}
+
+int
+lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
+{
+ MAILBOX_t *mbox;
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+ uint32_t status, evtctr;
+ uint32_t ha_copy;
+ int i;
+ unsigned long drvr_flag = 0;
+ volatile uint32_t word0, ldata;
+ void __iomem *to_slim;
+
+ psli = &phba->sli;
+
+ spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+
+
+ mb = &pmbox->mb;
+ status = MBX_SUCCESS;
+
+ if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ /* Polling for a mbox command when another one is already active
+ * is not allowed in SLI. Also, the driver must have established
+ * SLI2 mode to queue and process multiple mbox commands.
+ */
+
+ if (flag & MBX_POLL) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+
+ /* Mbox command <mbxCommand> cannot issue */
+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
+ return (MBX_NOT_FINISHED);
+ }
+
+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+ /* Mbox command <mbxCommand> cannot issue */
+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
+ return (MBX_NOT_FINISHED);
+ }
+
+ /* Handle STOP IOCB processing flag. This is only meaningful
+ * if we are not polling for mbox completion.
+ */
+ if (flag & MBX_STOP_IOCB) {
+ flag &= ~MBX_STOP_IOCB;
+ /* Now flag each ring */
+ for (i = 0; i < psli->num_rings; i++) {
+ /* If the ring is active, flag it */
+ if (psli->ring[i].cmdringaddr) {
+ psli->ring[i].flag |=
+ LPFC_STOP_IOCB_MBX;
+ }
+ }
+ }
+
+ /* Another mailbox command is still being processed, queue this
+ * command to be processed later.
+ */
+ lpfc_mbox_put(phba, pmbox);
+
+ /* Mbox cmd issue - BUSY */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->hba_state,
+ psli->sli_flag,
+ flag);
+
+ psli->slistat.mbox_busy++;
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+
+ return (MBX_BUSY);
+ }
+
+ /* Handle STOP IOCB processing flag. This is only meaningful
+ * if we are not polling for mbox completion.
+ */
+ if (flag & MBX_STOP_IOCB) {
+ flag &= ~MBX_STOP_IOCB;
+ if (flag == MBX_NOWAIT) {
+ /* Now flag each ring */
+ for (i = 0; i < psli->num_rings; i++) {
+ /* If the ring is active, flag it */
+ if (psli->ring[i].cmdringaddr) {
+ psli->ring[i].flag |=
+ LPFC_STOP_IOCB_MBX;
+ }
+ }
+ }
+ }
+
+ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+ /* If we are not polling, we MUST be in SLI2 mode */
+ if (flag != MBX_POLL) {
+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+ /* Mbox command <mbxCommand> cannot issue */
+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
+ return (MBX_NOT_FINISHED);
+ }
+ /* timeout active mbox command */
+ mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
+ }
+
+ /* Mailbox cmd <cmd> issue */
+ lpfc_printf_log(phba,
+ KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->hba_state,
+ psli->sli_flag,
+ flag);
+
+ psli->slistat.mbox_cmd++;
+ evtctr = psli->slistat.mbox_event;
+
+ /* next set own bit for the adapter and copy over command word */
+ mb->mbxOwner = OWN_CHIP;
+
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+
+ /* First copy command data to host SLIM area */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE);
+ } else {
+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ /* copy command data into host mbox for cmpl */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE);
+ }
+
+ /* First copy mbox command data to HBA SLIM, skip past first
+ word */
+ to_slim = phba->MBslimaddr + sizeof (uint32_t);
+ lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+ MAILBOX_CMD_SIZE - sizeof (uint32_t));
+
+ /* Next copy over first word, with mbxOwner set */
+ ldata = *((volatile uint32_t *)mb);
+ to_slim = phba->MBslimaddr;
+ writel(ldata, to_slim);
+ readl(to_slim); /* flush */
+
+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ /* switch over to host mailbox */
+ psli->sli_flag |= LPFC_SLI2_ACTIVE;
+ }
+ }
+
+ wmb();
+ /* interrupt board to doit right away */
+ writel(CA_MBATT, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+
+ switch (flag) {
+ case MBX_NOWAIT:
+ /* Don't wait for it to finish, just return */
+ psli->mbox_active = pmbox;
+ break;
+
+ case MBX_POLL:
+ i = 0;
+ psli->mbox_active = NULL;
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ /* First read mbox status word */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ word0 = *((volatile uint32_t *)mbox);
+ word0 = le32_to_cpu(word0);
+ } else {
+ /* First read mbox status word */
+ word0 = readl(phba->MBslimaddr);
+ }
+
+ /* Read the HBA Host Attention Register */
+ ha_copy = readl(phba->HAregaddr);
+
+ /* Wait for command to complete */
+ while (((word0 & OWN_CHIP) == OWN_CHIP)
+ || !(ha_copy & HA_MBATT)) {
+ if (i++ >= 100) {
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+ return (MBX_NOT_FINISHED);
+ }
+
+ /* Check if we took a mbox interrupt while we were
+ polling */
+ if (((word0 & OWN_CHIP) != OWN_CHIP)
+ && (evtctr != psli->slistat.mbox_event))
+ break;
+
+ spin_unlock_irqrestore(phba->host->host_lock,
+ drvr_flag);
+
+ /* Can be in interrupt context, do not sleep */
+ /* (or might be called with interrupts disabled) */
+ mdelay(i);
+
+ spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ /* First copy command data */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ word0 = *((volatile uint32_t *)mbox);
+ word0 = le32_to_cpu(word0);
+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ MAILBOX_t *slimmb;
+ volatile uint32_t slimword0;
+ /* Check real SLIM for any errors */
+ slimword0 = readl(phba->MBslimaddr);
+ slimmb = (MAILBOX_t *) & slimword0;
+ if (((slimword0 & OWN_CHIP) != OWN_CHIP)
+ && slimmb->mbxStatus) {
+ psli->sli_flag &=
+ ~LPFC_SLI2_ACTIVE;
+ word0 = slimword0;
+ }
+ }
+ } else {
+ /* First copy command data */
+ word0 = readl(phba->MBslimaddr);
+ }
+ /* Read the HBA Host Attention Register */
+ ha_copy = readl(phba->HAregaddr);
+ }
+
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ /* First copy command data */
+ mbox = (MAILBOX_t *) psli->MBhostaddr;
+ /* copy results back to user */
+ lpfc_sli_pcimem_bcopy(mbox, mb, MAILBOX_CMD_SIZE);
+ } else {
+ /* First copy command data */
+ lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ MAILBOX_CMD_SIZE);
+ if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
+ pmbox->context2) {
+ lpfc_memcpy_from_slim((void *)pmbox->context2,
+ phba->MBslimaddr + DMP_RSP_OFFSET,
+ mb->un.varDmp.word_cnt);
+ }
+ }
+
+ writel(HA_MBATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ status = mb->mbxStatus;
+ }
+
+ spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
+ return (status);
+}
+
+static int
+lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb)
+{
+ /* Insert the caller's iocb in the txq tail for later processing. */
+ list_add_tail(&piocb->list, &pring->txq);
+ pring->txq_cnt++;
+ return (0);
+}
+
+static struct lpfc_iocbq *
+lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq ** piocb)
+{
+ struct lpfc_iocbq * nextiocb;
+
+ nextiocb = lpfc_sli_ringtx_get(phba, pring);
+ if (!nextiocb) {
+ nextiocb = *piocb;
+ *piocb = NULL;
+ }
+
+ return nextiocb;
+}
+
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ struct lpfc_iocbq *nextiocb;
+ IOCB_t *iocb;
+
+ /*
+ * We should never get an IOCB if we are in a < LINK_DOWN state
+ */
+ if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+ return IOCB_ERROR;
+
+ /*
+ * Check to see if we are blocking IOCB processing because of a
+ * outstanding mbox command.
+ */
+ if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
+ goto iocb_busy;
+
+ if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
+ /*
+ * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
+ * can be issued if the link is not up.
+ */
+ switch (piocb->iocb.ulpCommand) {
+ case CMD_QUE_RING_BUF_CN:
+ case CMD_QUE_RING_BUF64_CN:
+ case CMD_CLOSE_XRI_CN:
+ case CMD_ABORT_XRI_CN:
+ /*
+ * For IOCBs, like QUE_RING_BUF, that have no rsp ring
+ * completion, iocb_cmpl MUST be 0.
+ */
+ if (piocb->iocb_cmpl)
+ piocb->iocb_cmpl = NULL;
+ /*FALLTHROUGH*/
+ case CMD_CREATE_XRI_CR:
+ break;
+ default:
+ goto iocb_busy;
+ }
+
+ /*
+ * For FCP commands, we must be in a state where we can process link
+ * attention events.
+ */
+ } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
+ !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
+ goto iocb_busy;
+
+ /*
+ * Check to see if this is a high priority command.
+ * If so bypass tx queue processing.
+ */
+ if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) &&
+ (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) {
+ lpfc_sli_submit_iocb(phba, pring, iocb, piocb);
+ piocb = NULL;
+ }
+
+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+ (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
+ lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+ if (iocb)
+ lpfc_sli_update_ring(phba, pring);
+ else
+ lpfc_sli_update_full_ring(phba, pring);
+
+ if (!piocb)
+ return IOCB_SUCCESS;
+
+ goto out_busy;
+
+ iocb_busy:
+ pring->stats.iocb_cmd_delay++;
+
+ out_busy:
+
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ lpfc_sli_ringtx_put(phba, pring, piocb);
+ return IOCB_SUCCESS;
+ }
+
+ return IOCB_BUSY;
+}
+
+int
+lpfc_sli_setup(struct lpfc_hba *phba)
+{
+ int i, totiocb = 0;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
+ psli->num_rings = MAX_CONFIGURED_RINGS;
+ psli->sli_flag = 0;
+ psli->fcp_ring = LPFC_FCP_RING;
+ psli->next_ring = LPFC_FCP_NEXT_RING;
+ psli->ip_ring = LPFC_IP_RING;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ switch (i) {
+ case LPFC_FCP_RING: /* ring 0 - FCP */
+ /* numCiocb and numRiocb are used in config_port */
+ pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
+ pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
+ pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+ pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+ pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+ pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+ pring->iotag_ctr = 0;
+ pring->iotag_max =
+ (phba->cfg_hba_queue_depth * 2);
+ pring->fast_iotag = pring->iotag_max;
+ pring->num_mask = 0;
+ break;
+ case LPFC_IP_RING: /* ring 1 - IP */
+ /* numCiocb and numRiocb are used in config_port */
+ pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
+ pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+ pring->num_mask = 0;
+ break;
+ case LPFC_ELS_RING: /* ring 2 - ELS / CT */
+ /* numCiocb and numRiocb are used in config_port */
+ pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
+ pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+ pring->fast_iotag = 0;
+ pring->iotag_ctr = 0;
+ pring->iotag_max = 4096;
+ pring->num_mask = 4;
+ pring->prt[0].profile = 0; /* Mask 0 */
+ pring->prt[0].rctl = FC_ELS_REQ;
+ pring->prt[0].type = FC_ELS_DATA;
+ pring->prt[0].lpfc_sli_rcv_unsol_event =
+ lpfc_els_unsol_event;
+ pring->prt[1].profile = 0; /* Mask 1 */
+ pring->prt[1].rctl = FC_ELS_RSP;
+ pring->prt[1].type = FC_ELS_DATA;
+ pring->prt[1].lpfc_sli_rcv_unsol_event =
+ lpfc_els_unsol_event;
+ pring->prt[2].profile = 0; /* Mask 2 */
+ /* NameServer Inquiry */
+ pring->prt[2].rctl = FC_UNSOL_CTL;
+ /* NameServer */
+ pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
+ pring->prt[2].lpfc_sli_rcv_unsol_event =
+ lpfc_ct_unsol_event;
+ pring->prt[3].profile = 0; /* Mask 3 */
+ /* NameServer response */
+ pring->prt[3].rctl = FC_SOL_CTL;
+ /* NameServer */
+ pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
+ pring->prt[3].lpfc_sli_rcv_unsol_event =
+ lpfc_ct_unsol_event;
+ break;
+ }
+ totiocb += (pring->numCiocb + pring->numRiocb);
+ }
+ if (totiocb > MAX_SLI2_IOCB) {
+ /* Too many cmd / rsp ring entries in SLI2 SLIM */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0462 Too many cmd / rsp ring entries in "
+ "SLI2 SLIM Data: x%x x%x\n",
+ phba->brd_no, totiocb, MAX_SLI2_IOCB);
+ }
+
+ return 0;
+}
+
+int
+lpfc_sli_queue_setup(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i, cnt;
+
+ psli = &phba->sli;
+ spin_lock_irq(phba->host->host_lock);
+ INIT_LIST_HEAD(&psli->mboxq);
+ /* Initialize list headers for txq and txcmplq as double linked lists */
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ pring->ringno = i;
+ pring->next_cmdidx = 0;
+ pring->local_getidx = 0;
+ pring->cmdidx = 0;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ INIT_LIST_HEAD(&pring->postbufq);
+ cnt = pring->fast_iotag;
+ spin_unlock_irq(phba->host->host_lock);
+ if (cnt) {
+ pring->fast_lookup =
+ kmalloc(cnt * sizeof (struct lpfc_iocbq *),
+ GFP_KERNEL);
+ if (pring->fast_lookup == 0) {
+ return (0);
+ }
+ memset((char *)pring->fast_lookup, 0,
+ cnt * sizeof (struct lpfc_iocbq *));
+ }
+ spin_lock_irq(phba->host->host_lock);
+ }
+ spin_unlock_irq(phba->host->host_lock);
+ return (1);
+}
+
+int
+lpfc_sli_hba_down(struct lpfc_hba * phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ LPFC_MBOXQ_t *pmb;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *icmd = NULL;
+ int i;
+ unsigned long flags = 0;
+
+ psli = &phba->sli;
+ lpfc_hba_down_prep(phba);
+
+ spin_lock_irqsave(phba->host->host_lock, flags);
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+
+ /*
+ * Error everything on the txq since these iocbs have not been
+ * given to the FW yet.
+ */
+ pring->txq_cnt = 0;
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ list_del_init(&iocb->list);
+ if (iocb->iocb_cmpl) {
+ icmd = &iocb->iocb;
+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+ spin_unlock_irqrestore(phba->host->host_lock,
+ flags);
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ } else {
+ list_add_tail(&iocb->list,
+ &phba->lpfc_iocb_list);
+ }
+ }
+
+ INIT_LIST_HEAD(&(pring->txq));
+
+ if (pring->fast_lookup) {
+ kfree(pring->fast_lookup);
+ pring->fast_lookup = NULL;
+ }
+
+ }
+
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+ /* Return any active mbox cmds */
+ del_timer_sync(&psli->mbox_tmo);
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ phba->work_hba_events &= ~WORKER_MBOX_TMO;
+ if (psli->mbox_active) {
+ pmb = psli->mbox_active;
+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+ if (pmb->mbox_cmpl) {
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+ pmb->mbox_cmpl(phba,pmb);
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ }
+ }
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ psli->mbox_active = NULL;
+
+ /* Return any pending mbox cmds */
+ while ((pmb = lpfc_mbox_get(phba)) != NULL) {
+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+ if (pmb->mbox_cmpl) {
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+ pmb->mbox_cmpl(phba,pmb);
+ spin_lock_irqsave(phba->host->host_lock, flags);
+ }
+ }
+
+ INIT_LIST_HEAD(&psli->mboxq);
+
+ spin_unlock_irqrestore(phba->host->host_lock, flags);
+
+ /*
+ * Provided the hba is not in an error state, reset it. It is not
+ * capable of IO anymore.
+ */
+ if (phba->hba_state != LPFC_HBA_ERROR) {
+ phba->hba_state = LPFC_INIT_START;
+ lpfc_sli_brdreset(phba, 1);
+ }
+
+ return 1;
+}
+
+void
+lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+ uint32_t *src = srcp;
+ uint32_t *dest = destp;
+ uint32_t ldata;
+ int i;
+
+ for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
+ ldata = *src;
+ ldata = le32_to_cpu(ldata);
+ *dest = ldata;
+ src++;
+ dest++;
+ }
+}
+
+int
+lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+ struct lpfc_dmabuf * mp)
+{
+ /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
+ later */
+ list_add_tail(&mp->list, &pring->postbufq);
+
+ pring->postbufq_cnt++;
+ return 0;
+}
+
+
+struct lpfc_dmabuf *
+lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ dma_addr_t phys)
+{
+ struct lpfc_dmabuf *mp, *next_mp;
+ struct list_head *slp = &pring->postbufq;
+
+ /* Search postbufq, from the begining, looking for a match on phys */
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ if (mp->phys == phys) {
+ list_del_init(&mp->list);
+ pring->postbufq_cnt--;
+ return mp;
+ }
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0410 Cannot find virtual addr for mapped buf on "
+ "ring %d Data x%llx x%p x%p x%x\n",
+ phba->brd_no, pring->ringno, (unsigned long long)phys,
+ slp->next, slp->prev, pring->postbufq_cnt);
+ return NULL;
+}
+
+static void
+lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+ struct lpfc_iocbq * rspiocb)
+{
+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+ /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
+ * just aborted.
+ * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
+ */
+ if (cmdiocb->context2) {
+ buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ /* Free the response IOCB before completing the abort
+ command. */
+ buf_ptr = NULL;
+ list_remove_head((&buf_ptr1->list), buf_ptr,
+ struct lpfc_dmabuf, list);
+ if (buf_ptr) {
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+ kfree(buf_ptr1);
+ }
+
+ if (cmdiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+
+ list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
+ return;
+}
+
+int
+lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * cmdiocb)
+{
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ struct lpfc_iocbq *abtsiocbp = NULL;
+ IOCB_t *icmd = NULL;
+ IOCB_t *iabt = NULL;
+
+ /* issue ABTS for this IOCB based on iotag */
+ list_remove_head(lpfc_iocb_list, abtsiocbp, struct lpfc_iocbq, list);
+ if (abtsiocbp == NULL)
+ return 0;
+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
+
+ iabt = &abtsiocbp->iocb;
+ icmd = &cmdiocb->iocb;
+ switch (icmd->ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ /* Even though we abort the ELS command, the firmware may access
+ * the BPL or other resources before it processes our
+ * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
+ * resources till the actual abort request completes.
+ */
+ abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
+ abtsiocbp->context2 = cmdiocb->context2;
+ abtsiocbp->context3 = cmdiocb->context3;
+ cmdiocb->context2 = NULL;
+ cmdiocb->context3 = NULL;
+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
+ break;
+ default:
+ list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
+ return 0;
+ }
+
+ iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
+ iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
+
+ iabt->ulpLe = 1;
+ iabt->ulpClass = CLASS3;
+ iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
+
+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
+ list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf *lpfc_cmd, uint16_t tgt_id,
+ uint64_t lun_id, struct lpfc_iocbq *iocb,
+ uint32_t ctx, lpfc_ctx_cmd ctx_cmd)
+{
+ int rc = 1;
+
+ if (lpfc_cmd == NULL)
+ return rc;
+
+ switch (ctx_cmd) {
+ case LPFC_CTX_LUN:
+ if ((lpfc_cmd->pCmd->device->id == tgt_id) &&
+ (lpfc_cmd->pCmd->device->lun == lun_id))
+ rc = 0;
+ break;
+ case LPFC_CTX_TGT:
+ if (lpfc_cmd->pCmd->device->id == tgt_id)
+ rc = 0;
+ break;
+ case LPFC_CTX_CTX:
+ if (iocb->iocb.ulpContext == ctx)
+ rc = 0;
+ case LPFC_CTX_HOST:
+ rc = 0;
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
+ __FUNCTION__, ctx_cmd);
+ break;
+ }
+
+ return rc;
+}
+
+int
+lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
+{
+ struct lpfc_iocbq *iocb, *next_iocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int sum = 0, ret_val = 0;
+
+ /* Next check the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
+ NULL, 0, ctx_cmd);
+ if (ret_val != 0)
+ continue;
+ sum++;
+ }
+ return sum;
+}
+
+int
+lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
+ lpfc_ctx_cmd abort_cmd)
+{
+ struct lpfc_iocbq *iocb, *next_iocb;
+ struct lpfc_iocbq *abtsiocb = NULL;
+ struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+ IOCB_t *cmd = NULL;
+ struct lpfc_scsi_buf *lpfc_cmd;
+ int errcnt = 0, ret_val = 0;
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ cmd = &iocb->iocb;
+
+ /* Must be a FCP command */
+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
+ continue;
+ }
+
+ /* context1 MUST be a struct lpfc_scsi_buf */
+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
+ ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
+ iocb, ctx, abort_cmd);
+ if (ret_val != 0)
+ continue;
+
+ /* issue ABTS for this IOCB based on iotag */
+ list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq,
+ list);
+ if (abtsiocb == NULL) {
+ errcnt++;
+ continue;
+ }
+ memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
+
+ abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+ abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
+ abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+ abtsiocb->iocb.ulpLe = 1;
+ abtsiocb->iocb.ulpClass = cmd->ulpClass;
+
+ if (phba->hba_state >= LPFC_LINK_UP)
+ abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ else
+ abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+ ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
+ if (ret_val == IOCB_ERROR) {
+ list_add_tail(&abtsiocb->list, lpfc_iocb_list);
+ errcnt++;
+ continue;
+ }
+ }
+
+ return errcnt;
+}
+
+void
+lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
+ struct lpfc_iocbq * queue1,
+ struct lpfc_iocbq * queue2)
+{
+ if (queue1->context2 && queue2)
+ memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
+
+ /* The waiter is looking for LPFC_IO_HIPRI bit to be set
+ as a signal to wake up */
+ queue1->iocb_flag |= LPFC_IO_HIPRI;
+ return;
+}
+
+int
+lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring,
+ struct lpfc_iocbq * piocb,
+ uint32_t flag,
+ struct lpfc_iocbq * prspiocbq,
+ uint32_t timeout)
+{
+ int j, delay_time, retval = IOCB_ERROR;
+
+ /* The caller must left context1 empty. */
+ if (piocb->context_un.hipri_wait_queue != 0) {
+ return IOCB_ERROR;
+ }
+
+ /*
+ * If the caller has provided a response iocbq buffer, context2 must
+ * be NULL or its an error.
+ */
+ if (prspiocbq && piocb->context2) {
+ return IOCB_ERROR;
+ }
+
+ piocb->context2 = prspiocbq;
+
+ /* Setup callback routine and issue the command. */
+ piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
+ retval = lpfc_sli_issue_iocb(phba, pring, piocb,
+ flag | SLI_IOCB_HIGH_PRIORITY);
+ if (retval != IOCB_SUCCESS) {
+ piocb->context2 = NULL;
+ return IOCB_ERROR;
+ }
+
+ /*
+ * This high-priority iocb was sent out-of-band. Poll for its
+ * completion rather than wait for a signal. Note that the host_lock
+ * is held by the midlayer and must be released here to allow the
+ * interrupt handlers to complete the IO and signal this routine via
+ * the iocb_flag.
+ * Also, the delay_time is computed to be one second longer than
+ * the scsi command timeout to give the FW time to abort on
+ * timeout rather than the driver just giving up. Typically,
+ * the midlayer does not specify a time for this command so the
+ * driver is free to enforce its own timeout.
+ */
+
+ delay_time = ((timeout + 1) * 1000) >> 6;
+ retval = IOCB_ERROR;
+ spin_unlock_irq(phba->host->host_lock);
+ for (j = 0; j < 64; j++) {
+ msleep(delay_time);
+ if (piocb->iocb_flag & LPFC_IO_HIPRI) {
+ piocb->iocb_flag &= ~LPFC_IO_HIPRI;
+ retval = IOCB_SUCCESS;
+ break;
+ }
+ }
+
+ spin_lock_irq(phba->host->host_lock);
+ piocb->context2 = NULL;
+ return retval;
+}
+int
+lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+ uint32_t timeout)
+{
+ DECLARE_WAIT_QUEUE_HEAD(done_q);
+ DECLARE_WAITQUEUE(wq_entry, current);
+ uint32_t timeleft = 0;
+ int retval;
+
+ /* The caller must leave context1 empty. */
+ if (pmboxq->context1 != 0) {
+ return (MBX_NOT_FINISHED);
+ }
+
+ /* setup wake call as IOCB callback */
+ pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
+ /* setup context field to pass wait_queue pointer to wake function */
+ pmboxq->context1 = &done_q;
+
+ /* start to sleep before we wait, to avoid races */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&done_q, &wq_entry);
+
+ /* now issue the command */
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+
+ if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
+ timeleft = schedule_timeout(timeout * HZ);
+ pmboxq->context1 = NULL;
+ /* if schedule_timeout returns 0, we timed out and were not
+ woken up */
+ if (timeleft == 0) {
+ retval = MBX_TIMEOUT;
+ } else {
+ retval = MBX_SUCCESS;
+ }
+ }
+
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&done_q, &wq_entry);
+ return retval;
+}
+
+irqreturn_t
+lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct lpfc_hba *phba;
+ uint32_t ha_copy;
+ uint32_t work_ha_copy;
+ unsigned long status;
+ int i;
+ uint32_t control;
+
+ /*
+ * Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ phba->sli.slistat.sli_intr++;
+
+ /*
+ * Call the HBA to see if it is interrupting. If not, don't claim
+ * the interrupt
+ */
+
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+ return IRQ_NONE;
+
+ /*
+ * Read host attention register to determine interrupt source
+ * Clear Attention Sources, except Error Attention (to
+ * preserve status) and Link Attention
+ */
+ spin_lock(phba->host->host_lock);
+ ha_copy = readl(phba->HAregaddr);
+ writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock(phba->host->host_lock);
+
+ if (unlikely(!ha_copy))
+ return IRQ_NONE;
+
+ work_ha_copy = ha_copy & phba->work_ha_mask;
+
+ if (unlikely(work_ha_copy)) {
+ if (work_ha_copy & HA_LATT) {
+ if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
+ /*
+ * Turn off Link Attention interrupts
+ * until CLEAR_LA done
+ */
+ spin_lock(phba->host->host_lock);
+ phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control &= ~HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock(phba->host->host_lock);
+ }
+ else
+ work_ha_copy &= ~HA_LATT;
+ }
+
+ if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
+ for (i = 0; i < phba->sli.num_rings; i++) {
+ if (work_ha_copy & (HA_RXATT << (4*i))) {
+ /*
+ * Turn off Slow Rings interrupts
+ */
+ spin_lock(phba->host->host_lock);
+ control = readl(phba->HCregaddr);
+ control &= ~(HC_R0INT_ENA << i);
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock(phba->host->host_lock);
+ }
+ }
+ }
+
+ if (work_ha_copy & HA_ERATT) {
+ phba->hba_state = LPFC_HBA_ERROR;
+ /*
+ * There was a link/board error. Read the
+ * status register to retrieve the error event
+ * and process it.
+ */
+ phba->sli.slistat.err_attn_event++;
+ /* Save status info */
+ phba->work_hs = readl(phba->HSregaddr);
+ phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
+ phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+
+ /* Clear Chip error bit */
+ writel(HA_ERATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ /*
+ * Reseting the HBA is the only reliable way
+ * to shutdown interrupt when there is a
+ * ERROR.
+ */
+ lpfc_sli_send_reset(phba, phba->hba_state);
+ }
+
+ spin_lock(phba->host->host_lock);
+ phba->work_ha |= work_ha_copy;
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ spin_unlock(phba->host->host_lock);
+ }
+
+ ha_copy &= ~(phba->work_ha_mask);
+
+ /*
+ * Process all events on FCP ring. Take the optimized path for
+ * FCP IO. Any other IO is slow path and is handled by
+ * the worker thread.
+ */
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+ status >>= (4*LPFC_FCP_RING);
+ if (status & HA_RXATT)
+ lpfc_sli_handle_fast_ring_event(phba,
+ &phba->sli.ring[LPFC_FCP_RING],
+ status);
+ return IRQ_HANDLED;
+
+} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
new file mode 100644
index 00000000000..abd9a8c84e9
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -0,0 +1,216 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_sli.h 1.42 2005/03/21 02:01:28EST sf_support Exp $
+ */
+
+/* forward declaration for LPFC_IOCB_t's use */
+struct lpfc_hba;
+
+/* Define the context types that SLI handles for abort and sums. */
+typedef enum _lpfc_ctx_cmd {
+ LPFC_CTX_LUN,
+ LPFC_CTX_TGT,
+ LPFC_CTX_CTX,
+ LPFC_CTX_HOST
+} lpfc_ctx_cmd;
+
+/* This structure is used to handle IOCB requests / responses */
+struct lpfc_iocbq {
+ /* lpfc_iocbqs are used in double linked lists */
+ struct list_head list;
+ IOCB_t iocb; /* IOCB cmd */
+ uint8_t retry; /* retry counter for IOCB cmd - if needed */
+ uint8_t iocb_flag;
+#define LPFC_IO_POLL 1 /* Polling mode iocb */
+#define LPFC_IO_LIBDFC 2 /* libdfc iocb */
+#define LPFC_IO_WAIT 4
+#define LPFC_IO_HIPRI 8 /* High Priority Queue signal flag */
+
+ uint8_t abort_count;
+ uint8_t rsvd2;
+ uint32_t drvrTimeout; /* driver timeout in seconds */
+ void *context1; /* caller context information */
+ void *context2; /* caller context information */
+ void *context3; /* caller context information */
+ union {
+ wait_queue_head_t *hipri_wait_queue; /* High Priority Queue wait
+ queue */
+ struct lpfc_iocbq *rsp_iocb;
+ struct lpfcMboxq *mbox;
+ } context_un;
+
+ void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
+};
+
+#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
+#define SLI_IOCB_HIGH_PRIORITY 2 /* High priority command */
+
+#define IOCB_SUCCESS 0
+#define IOCB_BUSY 1
+#define IOCB_ERROR 2
+#define IOCB_TIMEDOUT 3
+
+typedef struct lpfcMboxq {
+ /* MBOXQs are used in single linked lists */
+ struct list_head list; /* ptr to next mailbox command */
+ MAILBOX_t mb; /* Mailbox cmd */
+ void *context1; /* caller context information */
+ void *context2; /* caller context information */
+
+ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
+
+} LPFC_MBOXQ_t;
+
+#define MBX_POLL 1 /* poll mailbox till command done, then
+ return */
+#define MBX_NOWAIT 2 /* issue command then return immediately */
+#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds
+ complete */
+
+#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per
+ ring */
+#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
+
+struct lpfc_sli_ring;
+
+struct lpfc_sli_ring_mask {
+ uint8_t profile; /* profile associated with ring */
+ uint8_t rctl; /* rctl / type pair configured for ring */
+ uint8_t type; /* rctl / type pair configured for ring */
+ uint8_t rsvd;
+ /* rcv'd unsol event */
+ void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
+ struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+};
+
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_ring_stat {
+ uint64_t iocb_event; /* IOCB event counters */
+ uint64_t iocb_cmd; /* IOCB cmd issued */
+ uint64_t iocb_rsp; /* IOCB rsp received */
+ uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
+ uint64_t iocb_cmd_full; /* IOCB cmd ring full */
+ uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
+ uint64_t iocb_rsp_full; /* IOCB rsp ring full */
+};
+
+/* Structure used to hold SLI ring information */
+struct lpfc_sli_ring {
+ uint16_t flag; /* ring flags */
+#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
+#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
+#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */
+#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
+#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */
+ uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
+
+ uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
+ uint32_t next_cmdidx; /* next_cmd index */
+ uint8_t rsvd;
+ uint8_t ringno; /* ring number */
+ uint8_t rspidx; /* current index in response ring */
+ uint8_t cmdidx; /* current index in command ring */
+ uint16_t numCiocb; /* number of command iocb's per ring */
+ uint16_t numRiocb; /* number of rsp iocb's per ring */
+
+ uint32_t fast_iotag; /* max fastlookup based iotag */
+ uint32_t iotag_ctr; /* keeps track of the next iotag to use */
+ uint32_t iotag_max; /* max iotag value to use */
+ struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
+ iotag */
+ struct list_head txq;
+ uint16_t txq_cnt; /* current length of queue */
+ uint16_t txq_max; /* max length */
+ struct list_head txcmplq;
+ uint16_t txcmplq_cnt; /* current length of queue */
+ uint16_t txcmplq_max; /* max length */
+ uint32_t *cmdringaddr; /* virtual address for cmd rings */
+ uint32_t *rspringaddr; /* virtual address for rsp rings */
+ uint32_t missbufcnt; /* keep track of buffers to post */
+ struct list_head postbufq;
+ uint16_t postbufq_cnt; /* current length of queue */
+ uint16_t postbufq_max; /* max length */
+ struct list_head iocb_continueq;
+ uint16_t iocb_continueq_cnt; /* current length of queue */
+ uint16_t iocb_continueq_max; /* max length */
+
+ struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
+ uint32_t num_mask; /* number of mask entries in prt array */
+
+ struct lpfc_sli_ring_stat stats; /* SLI statistical info */
+
+ /* cmd ring available */
+ void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
+ struct lpfc_sli_ring *);
+};
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_stat {
+ uint64_t mbox_stat_err; /* Mbox cmds completed status error */
+ uint64_t mbox_cmd; /* Mailbox commands issued */
+ uint64_t sli_intr; /* Count of Host Attention interrupts */
+ uint32_t err_attn_event; /* Error Attn event counters */
+ uint32_t link_event; /* Link event counters */
+ uint32_t mbox_event; /* Mailbox event counters */
+ uint32_t mbox_busy; /* Mailbox cmd busy */
+};
+
+/* Structure used to hold SLI information */
+struct lpfc_sli {
+ uint32_t num_rings;
+ uint32_t sli_flag;
+
+ /* Additional sli_flags */
+#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
+#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
+#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
+
+ struct lpfc_sli_ring ring[LPFC_MAX_RING];
+ int fcp_ring; /* ring used for FCP initiator commands */
+ int next_ring;
+
+ int ip_ring; /* ring used for IP network drv cmds */
+
+ struct lpfc_sli_stat slistat; /* SLI statistical info */
+ struct list_head mboxq;
+ uint16_t mboxq_cnt; /* current length of queue */
+ uint16_t mboxq_max; /* max length */
+ LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
+
+ struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
+ cmd */
+
+ uint32_t *MBhostaddr; /* virtual address for mbox cmds */
+};
+
+/* Given a pointer to the start of the ring, and the slot number of
+ * the desired iocb entry, calc a pointer to that entry.
+ * (assume iocb entry size is 32 bytes, or 8 words)
+ */
+#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
+
+#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
+ command */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
new file mode 100644
index 00000000000..dfacd8d8209
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -0,0 +1,32 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Enterprise Fibre Channel Host Bus Adapters. *
+ * Refer to the README file included with this package for *
+ * driver version and adapter support. *
+ * Copyright (C) 2004 Emulex Corporation. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of the GNU General Public License *
+ * as published by the Free Software Foundation; either version 2 *
+ * of the License, or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details, a copy of which *
+ * can be found in the file COPYING included with this package. *
+ *******************************************************************/
+
+/*
+ * $Id: lpfc_version.h 1.49 2005/04/13 15:07:19EDT sf_support Exp $
+ */
+
+#define LPFC_DRIVER_VERSION "8.0.28"
+
+#define LPFC_DRIVER_NAME "lpfc"
+
+#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
+ LPFC_DRIVER_VERSION
+
+#define DFC_API_VERSION "0.0.0"