aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 17:53:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 17:53:36 -0700
commit39695224bd84dc4be29abad93a0ec232a16fc519 (patch)
tree2bfa5cb50788a4c8be9f2e9f4412e47a565f4508 /drivers/scsi/lpfc
parenta9bbd210a44102cc50b30a5f3d111dbf5f2f9cd4 (diff)
parentea038f63ac52439e7816295fa6064fe95e6c1f51 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (209 commits) [SCSI] fix oops during scsi scanning [SCSI] libsrp: fix memory leak in srp_ring_free() [SCSI] libiscsi, bnx2i: make bound ep check common [SCSI] libiscsi: add completion function for drivers that do not need pdu processing [SCSI] scsi_dh_rdac: changes for rdac debug logging [SCSI] scsi_dh_rdac: changes to collect the rdac debug information during the initialization [SCSI] scsi_dh_rdac: move the init code from rdac_activate to rdac_bus_attach [SCSI] sg: fix oops in the error path in sg_build_indirect() [SCSI] mptsas : Bump version to 3.04.12 [SCSI] mptsas : FW event thread and scsi mid layer deadlock in SYNCHRONIZE CACHE command [SCSI] mptsas : Send DID_NO_CONNECT for pending IOs of removed device [SCSI] mptsas : PAE Kernel more than 4 GB kernel panic [SCSI] mptsas : NULL pointer on big endian systems causing Expander not to tear off [SCSI] mptsas : Sanity check for phyinfo is added [SCSI] scsi_dh_rdac: Add support for Sun StorageTek ST2500, ST2510 and ST2530 [SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller [SCSI] qla2xxx: Update version number to 8.03.01-k6. [SCSI] qla2xxx: Properly delete rports attached to a vport. [SCSI] qla2xxx: Correct various NPIV issues. [SCSI] qla2xxx: Correct qla2x00_eh_wait_on_command() to wait correctly. ...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c904
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c259
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h74
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c263
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c53
19 files changed, 1689 insertions, 223 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 1c286707dd5..ad05d6edb8f 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -28,4 +28,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
- lpfc_vport.o lpfc_debugfs.o
+ lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1877d981183..aa10f795163 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -312,6 +312,7 @@ struct lpfc_vport {
#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
+#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -440,6 +441,12 @@ enum intr_type_t {
MSIX,
};
+struct unsol_rcv_ct_ctx {
+ uint32_t ctxt_id;
+ uint32_t SID;
+ uint32_t oxid;
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -525,6 +532,8 @@ struct lpfc_hba {
#define FCP_XRI_ABORT_EVENT 0x20
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
+#define LINK_DISABLED 0x100 /* Link disabled by user */
+#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
@@ -616,6 +625,8 @@ struct lpfc_hba {
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
+ uint32_t fcp_qidx; /* next work queue to post work to */
+
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
@@ -682,6 +693,7 @@ struct lpfc_hba {
struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
+ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@@ -763,11 +775,18 @@ struct lpfc_hba {
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
atomic_t fast_event_count;
+ uint32_t fcoe_eventtag;
+ uint32_t fcoe_eventtag_at_fcf_scan;
struct lpfc_fcf fcf;
uint8_t fc_map[3];
uint8_t valid_vlan;
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
+
+ struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
+ struct list_head ct_ev_waiters;
+ struct unsol_rcv_ct_ctx ct_ctx[64];
+ uint32_t ctx_idx;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index fc07be5fbce..e1a30a16a9f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -394,7 +394,12 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
- len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
+ if (phba->hba_flag & LINK_DISABLED)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Link Down - User disabled\n");
+ else
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Link Down\n");
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
@@ -4127,6 +4132,9 @@ struct fc_function_template lpfc_transport_functions = {
.vport_disable = lpfc_vport_disable,
.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+
+ .bsg_request = lpfc_bsg_request,
+ .bsg_timeout = lpfc_bsg_timeout,
};
struct fc_function_template lpfc_vport_transport_functions = {
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
new file mode 100644
index 00000000000..da6bf5aac9d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -0,0 +1,904 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2009 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/mempool.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+/**
+ * lpfc_bsg_rport_ct - send a CT command from a bsg request
+ * @job: fc_bsg_job to handle
+ */
+static int
+lpfc_bsg_rport_ct(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost = job->shost;
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct ulp_bde64 *bpl = NULL;
+ uint32_t timeout;
+ struct lpfc_iocbq *cmdiocbq = NULL;
+ struct lpfc_iocbq *rspiocbq = NULL;
+ IOCB_t *cmd;
+ IOCB_t *rsp;
+ struct lpfc_dmabuf *bmp = NULL;
+ int request_nseg;
+ int reply_nseg;
+ struct scatterlist *sgel = NULL;
+ int numbde;
+ dma_addr_t busaddr;
+ int rc = 0;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (!lpfc_nlp_get(ndlp)) {
+ job->reply->result = -ENODEV;
+ return 0;
+ }
+
+ if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
+ rc = -ENODEV;
+ goto free_ndlp_exit;
+ }
+
+ spin_lock_irq(shost->host_lock);
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = -ENOMEM;
+ spin_unlock_irq(shost->host_lock);
+ goto free_ndlp_exit;
+ }
+ cmd = &cmdiocbq->iocb;
+
+ rspiocbq = lpfc_sli_get_iocbq(phba);
+ if (!rspiocbq) {
+ rc = -ENOMEM;
+ goto free_cmdiocbq;
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ rsp = &rspiocbq->iocb;
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ spin_lock_irq(shost->host_lock);
+ goto free_rspiocbq;
+ }
+
+ spin_lock_irq(shost->host_lock);
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt) {
+ rc = -ENOMEM;
+ goto free_bmp;
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ INIT_LIST_HEAD(&bmp->list);
+ bpl = (struct ulp_bde64 *) bmp->virt;
+
+ request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+ busaddr = sg_dma_address(sgel);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = cpu_to_le32(bpl->tus.w);
+ bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+ bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+ bpl++;
+ }
+
+ reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+ busaddr = sg_dma_address(sgel);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = cpu_to_le32(bpl->tus.w);
+ bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+ bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+ bpl++;
+ }
+
+ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize =
+ (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
+ cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpContext = ndlp->nlp_rpi;
+ cmd->ulpOwner = OWN_CHIP;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->context1 = NULL;
+ cmdiocbq->context2 = NULL;
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+
+ timeout = phba->fc_ratov * 2;
+ job->dd_data = cmdiocbq;
+
+ rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
+ timeout + LPFC_DRVR_TIMEOUT);
+
+ if (rc != IOCB_TIMEDOUT) {
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ }
+
+ if (rc == IOCB_TIMEDOUT) {
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ rc = -EACCES;
+ goto free_ndlp_exit;
+ }
+
+ if (rc != IOCB_SUCCESS) {
+ rc = -EACCES;
+ goto free_outdmp;
+ }
+
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & 0xff) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ goto free_outdmp;
+ }
+ } else
+ job->reply->reply_payload_rcv_len =
+ rsp->un.genreq64.bdl.bdeSize;
+
+free_outdmp:
+ spin_lock_irq(shost->host_lock);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+free_bmp:
+ kfree(bmp);
+free_rspiocbq:
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+free_cmdiocbq:
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ spin_unlock_irq(shost->host_lock);
+free_ndlp_exit:
+ lpfc_nlp_put(ndlp);
+
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace */
+ job->job_done(job);
+
+ return 0;
+}
+
+/**
+ * lpfc_bsg_rport_els - send an ELS command from a bsg request
+ * @job: fc_bsg_job to handle
+ */
+static int
+lpfc_bsg_rport_els(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+
+ uint32_t elscmd;
+ uint32_t cmdsize;
+ uint32_t rspsize;
+ struct lpfc_iocbq *rspiocbq;
+ struct lpfc_iocbq *cmdiocbq;
+ IOCB_t *rsp;
+ uint16_t rpi = 0;
+ struct lpfc_dmabuf *pcmd;
+ struct lpfc_dmabuf *prsp;
+ struct lpfc_dmabuf *pbuflist = NULL;
+ struct ulp_bde64 *bpl;
+ int iocb_status;
+ int request_nseg;
+ int reply_nseg;
+ struct scatterlist *sgel = NULL;
+ int numbde;
+ dma_addr_t busaddr;
+ int rc = 0;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (!lpfc_nlp_get(ndlp)) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ elscmd = job->request->rqst_data.r_els.els_code;
+ cmdsize = job->request_payload.payload_len;
+ rspsize = job->reply_payload.payload_len;
+ rspiocbq = lpfc_sli_get_iocbq(phba);
+ if (!rspiocbq) {
+ lpfc_nlp_put(ndlp);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rsp = &rspiocbq->iocb;
+ rpi = ndlp->nlp_rpi;
+
+ cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
+ ndlp->nlp_DID, elscmd);
+
+ if (!cmdiocbq) {
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ return -EIO;
+ }
+
+ job->dd_data = cmdiocbq;
+ pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
+
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ kfree(pcmd);
+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+ kfree(prsp);
+ cmdiocbq->context2 = NULL;
+
+ pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
+ bpl = (struct ulp_bde64 *) pbuflist->virt;
+
+ request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+ busaddr = sg_dma_address(sgel);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = cpu_to_le32(bpl->tus.w);
+ bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+ bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+ bpl++;
+ }
+
+ reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+ busaddr = sg_dma_address(sgel);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = cpu_to_le32(bpl->tus.w);
+ bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+ bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+ bpl++;
+ }
+
+ cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
+ (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+ cmdiocbq->iocb.ulpContext = rpi;
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->context1 = NULL;
+ cmdiocbq->context2 = NULL;
+
+ iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+ rspiocbq, (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+
+ /* release the new ndlp once the iocb completes */
+ lpfc_nlp_put(ndlp);
+ if (iocb_status != IOCB_TIMEDOUT) {
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ }
+
+ if (iocb_status == IOCB_SUCCESS) {
+ if (rsp->ulpStatus == IOSTAT_SUCCESS) {
+ job->reply->reply_payload_rcv_len =
+ rsp->un.elsreq64.bdl.bdeSize;
+ rc = 0;
+ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ struct fc_bsg_ctels_reply *els_reply;
+ /* LS_RJT data returned in word 4 */
+ uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+
+ els_reply = &job->reply->reply_data.ctels_reply;
+ job->reply->result = 0;
+ els_reply->status = FC_CTELS_STATUS_REJECT;
+ els_reply->rjt_data.action = rjt_data[0];
+ els_reply->rjt_data.reason_code = rjt_data[1];
+ els_reply->rjt_data.reason_explanation = rjt_data[2];
+ els_reply->rjt_data.vendor_unique = rjt_data[3];
+ } else
+ rc = -EIO;
+ } else
+ rc = -EIO;
+
+ if (iocb_status != IOCB_TIMEDOUT)
+ lpfc_els_free_iocb(phba, cmdiocbq);
+
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+
+out:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace */
+ job->job_done(job);
+
+ return 0;
+}
+
+struct lpfc_ct_event {
+ struct list_head node;
+ int ref;
+ wait_queue_head_t wq;
+
+ /* Event type and waiter identifiers */
+ uint32_t type_mask;
+ uint32_t req_id;
+ uint32_t reg_id;
+
+ /* next two flags are here for the auto-delete logic */
+ unsigned long wait_time_stamp;
+ int waiting;
+
+ /* seen and not seen events */
+ struct list_head events_to_get;
+ struct list_head events_to_see;
+};
+
+struct event_data {
+ struct list_head node;
+ uint32_t type;
+ uint32_t immed_dat;
+ void *data;
+ uint32_t len;
+};
+
+static struct lpfc_ct_event *
+lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
+{
+ struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+ if (!evt)
+ return NULL;
+
+ INIT_LIST_HEAD(&evt->events_to_get);
+ INIT_LIST_HEAD(&evt->events_to_see);
+ evt->req_id = ev_req_id;
+ evt->reg_id = ev_reg_id;
+ evt->wait_time_stamp = jiffies;
+ init_waitqueue_head(&evt->wq);
+
+ return evt;
+}
+
+static void
+lpfc_ct_event_free(struct lpfc_ct_event *evt)
+{
+ struct event_data *ed;
+
+ list_del(&evt->node);
+
+ while (!list_empty(&evt->events_to_get)) {
+ ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
+ list_del(&ed->node);
+ kfree(ed->data);
+ kfree(ed);
+ }
+
+ while (!list_empty(&evt->events_to_see)) {
+ ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
+ list_del(&ed->node);
+ kfree(ed->data);
+ kfree(ed);
+ }
+
+ kfree(evt);
+}
+
+static inline void
+lpfc_ct_event_ref(struct lpfc_ct_event *evt)
+{
+ evt->ref++;
+}
+
+static inline void
+lpfc_ct_event_unref(struct lpfc_ct_event *evt)
+{
+ if (--evt->ref < 0)
+ lpfc_ct_event_free(evt);
+}
+
+#define SLI_CT_ELX_LOOPBACK 0x10
+
+enum ELX_LOOPBACK_CMD {
+ ELX_LOOPBACK_XRI_SETUP,
+ ELX_LOOPBACK_DATA,
+};
+
+/**
+ * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
+ * @phba:
+ * @pring:
+ * @piocbq:
+ *
+ * This function is called when an unsolicited CT command is received. It
+ * forwards the event to any processes registerd to receive CT events.
+ */
+void
+lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocbq)
+{
+ uint32_t evt_req_id = 0;
+ uint32_t cmd;
+ uint32_t len;
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct lpfc_ct_event *evt;
+ struct event_data *evt_dat = NULL;
+ struct lpfc_iocbq *iocbq;
+ size_t offset = 0;
+ struct list_head head;
+ struct ulp_bde64 *bde;
+ dma_addr_t dma_addr;
+ int i;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+ struct lpfc_hbq_entry *hbqe;
+ struct lpfc_sli_ct_request *ct_req;
+
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+
+ if (piocbq->iocb.ulpBdeCount == 0 ||
+ piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
+ goto error_ct_unsol_exit;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+ dmabuf = bdeBuf1;
+ else {
+ dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
+ piocbq->iocb.un.cont64[0].addrLow);
+ dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
+ }
+
+ ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+ evt_req_id = ct_req->FsType;
+ cmd = ct_req->CommandResponse.bits.CmdRsp;
+ len = ct_req->CommandResponse.bits.Size;
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
+
+ mutex_lock(&phba->ct_event_mutex);
+ list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ if (evt->req_id != evt_req_id)
+ continue;
+
+ lpfc_ct_event_ref(evt);
+
+ evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
+ if (!evt_dat) {
+ lpfc_ct_event_unref(evt);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2614 Memory allocation failed for "
+ "CT event\n");
+ break;
+ }
+
+ mutex_unlock(&phba->ct_event_mutex);
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ /* take accumulated byte count from the last iocbq */
+ iocbq = list_entry(head.prev, typeof(*iocbq), list);
+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+ } else {
+ list_for_each_entry(iocbq, &head, list) {
+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+ evt_dat->len +=
+ iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+ }
+ }
+
+ evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
+ if (!evt_dat->data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2615 Memory allocation failed for "
+ "CT event data, size %d\n",
+ evt_dat->len);
+ kfree(evt_dat);
+ mutex_lock(&phba->ct_event_mutex);
+ lpfc_ct_event_unref(evt);
+ mutex_unlock(&phba->ct_event_mutex);
+ goto error_ct_unsol_exit;
+ }
+
+ list_for_each_entry(iocbq, &head, list) {
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ bdeBuf1 = iocbq->context2;
+ bdeBuf2 = iocbq->context3;
+ }
+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+ int size = 0;
+ if (phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED) {
+ if (i == 0) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &iocbq->iocb.un.ulpWord[0];
+ size = hbqe->bde.tus.f.bdeSize;
+ dmabuf = bdeBuf1;
+ } else if (i == 1) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &iocbq->iocb.unsli3.
+ sli3Words[4];
+ size = hbqe->bde.tus.f.bdeSize;
+ dmabuf = bdeBuf2;
+ }
+ if ((offset + size) > evt_dat->len)
+ size = evt_dat->len - offset;
+ } else {
+ size = iocbq->iocb.un.cont64[i].
+ tus.f.bdeSize;
+ bde = &iocbq->iocb.un.cont64[i];
+ dma_addr = getPaddr(bde->addrHigh,
+ bde->addrLow);
+ dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring, dma_addr);
+ }
+ if (!dmabuf) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_LIBDFC, "2616 No dmabuf "
+ "found for iocbq 0x%p\n",
+ iocbq);
+ kfree(evt_dat->data);
+ kfree(evt_dat);
+ mutex_lock(&phba->ct_event_mutex);
+ lpfc_ct_event_unref(evt);
+ mutex_unlock(&phba->ct_event_mutex);
+ goto error_ct_unsol_exit;
+ }
+ memcpy((char *)(evt_dat->data) + offset,
+ dmabuf->virt, size);
+ offset += size;
+ if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
+ !(phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED)) {
+ lpfc_sli_ringpostbuf_put(phba, pring,
+ dmabuf);
+ } else {
+ switch (cmd) {
+ case ELX_LOOPBACK_XRI_SETUP:
+ if (!(phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba,
+ pring,
+ 1);
+ else
+ lpfc_in_buf_free(phba,
+ dmabuf);
+ break;
+ default:
+ if (!(phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba,
+ pring,
+ 1);
+ break;
+ }
+ }
+ }
+ }
+
+ mutex_lock(&phba->ct_event_mutex);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ evt_dat->immed_dat = phba->ctx_idx;
+ phba->ctx_idx = (phba->ctx_idx + 1) % 64;
+ phba->ct_ctx[evt_dat->immed_dat].oxid =
+ piocbq->iocb.ulpContext;
+ phba->ct_ctx[evt_dat->immed_dat].SID =
+ piocbq->iocb.un.rcvels.remoteID;
+ } else
+ evt_dat->immed_dat = piocbq->iocb.ulpContext;
+
+ evt_dat->type = FC_REG_CT_EVENT;
+ list_add(&evt_dat->node, &evt->events_to_see);
+ wake_up_interruptible(&evt->wq);
+ lpfc_ct_event_unref(evt);
+ if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+ break;
+ }
+ mutex_unlock(&phba->ct_event_mutex);
+
+error_ct_unsol_exit:
+ if (!list_empty(&head))
+ list_del(&head);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
+ * @job: SET_EVENT fc_bsg_job
+ */
+static int
+lpfc_bsg_set_event(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct set_ct_event *event_req;
+ struct lpfc_ct_event *evt;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2612 Received SET_CT_EVENT below minimum "
+ "size\n");
+ return -EINVAL;
+ }
+
+ event_req = (struct set_ct_event *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ mutex_lock(&phba->ct_event_mutex);
+ list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ if (evt->reg_id == event_req->ev_reg_id) {
+ lpfc_ct_event_ref(evt);
+ evt->wait_time_stamp = jiffies;
+ break;
+ }
+ }
+ mutex_unlock(&phba->ct_event_mutex);
+
+ if (&evt->node == &phba->ct_ev_waiters) {
+ /* no event waiting struct yet - first call */
+ evt = lpfc_ct_event_new(event_req->ev_reg_id,
+ event_req->ev_req_id);
+ if (!evt) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2617 Failed allocation of event "
+ "waiter\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&phba->ct_event_mutex);
+ list_add(&evt->node, &phba->ct_ev_waiters);
+ lpfc_ct_event_ref(evt);
+ mutex_unlock(&phba->ct_event_mutex);
+ }
+
+ evt->waiting = 1;
+ if (wait_event_interruptible(evt->wq,
+ !list_empty(&evt->events_to_see))) {
+ mutex_lock(&phba->ct_event_mutex);
+ lpfc_ct_event_unref(evt); /* release ref */
+ lpfc_ct_event_unref(evt); /* delete */
+ mutex_unlock(&phba->ct_event_mutex);
+ rc = -EINTR;
+ goto set_event_out;
+ }
+
+ evt->wait_time_stamp = jiffies;
+ evt->waiting = 0;
+
+ mutex_lock(&phba->ct_event_mutex);
+ list_move(evt->events_to_see.prev, &evt->events_to_get);
+ lpfc_ct_event_unref(evt); /* release ref */
+ mutex_unlock(&phba->ct_event_mutex);
+
+set_event_out:
+ /* set_event carries no reply payload */
+ job->reply->reply_payload_rcv_len = 0;
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace */
+ job->job_done(job);
+
+ return 0;
+}
+
+/**
+ * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
+ * @job: GET_EVENT fc_bsg_job
+ */
+static int
+lpfc_bsg_get_event(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct get_ct_event *event_req;
+ struct get_ct_event_reply *event_reply;
+ struct lpfc_ct_event *evt;
+ struct event_data *evt_dat = NULL;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2613 Received GET_CT_EVENT request below "
+ "minimum size\n");
+ return -EINVAL;
+ }
+
+ event_req = (struct get_ct_event *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ event_reply = (struct get_ct_event_reply *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ mutex_lock(&phba->ct_event_mutex);
+ list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ if (evt->reg_id == event_req->ev_reg_id) {
+ if (list_empty(&evt->events_to_get))
+ break;
+ lpfc_ct_event_ref(evt);
+ evt->wait_time_stamp = jiffies;
+ evt_dat = list_entry(evt->events_to_get.prev,
+ struct event_data, node);
+ list_del(&evt_dat->node);
+ break;
+ }
+ }
+ mutex_unlock(&phba->ct_event_mutex);
+
+ if (!evt_dat) {
+ job->reply->reply_payload_rcv_len = 0;
+ rc = -ENOENT;
+ goto error_get_event_exit;
+ }
+
+ if (evt_dat->len > job->reply_payload.payload_len) {
+ evt_dat->len = job->reply_payload.payload_len;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2618 Truncated event data at %d "
+ "bytes\n",
+ job->reply_payload.payload_len);
+ }
+
+ event_reply->immed_data = evt_dat->immed_dat;
+
+ if (evt_dat->len > 0)
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ evt_dat->data, evt_dat->len);
+ else
+ job->reply->reply_payload_rcv_len = 0;
+ rc = 0;
+
+ if (evt_dat)
+ kfree(evt_dat->data);
+ kfree(evt_dat);
+ mutex_lock(&phba->ct_event_mutex);
+ lpfc_ct_event_unref(evt);
+ mutex_unlock(&phba->ct_event_mutex);
+
+error_get_event_exit:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace */
+ job->job_done(job);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
+ * @job: fc_bsg_job to handle
+ */
+static int
+lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+{
+ int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+
+ switch (command) {
+ case LPFC_BSG_VENDOR_SET_CT_EVENT:
+ return lpfc_bsg_set_event(job);
+ break;
+
+ case LPFC_BSG_VENDOR_GET_CT_EVENT:
+ return lpfc_bsg_get_event(job);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * lpfc_bsg_request - handle a bsg request from the FC transport
+ * @job: fc_bsg_job to handle
+ */
+int
+lpfc_bsg_request(struct fc_bsg_job *job)
+{
+ uint32_t msgcode;
+ int rc = -EINVAL;
+
+ msgcode = job->request->msgcode;
+
+ switch (msgcode) {
+ case FC_BSG_HST_VENDOR:
+ rc = lpfc_bsg_hst_vendor(job);
+ break;
+ case FC_BSG_RPT_ELS:
+ rc = lpfc_bsg_rport_els(job);
+ break;
+ case FC_BSG_RPT_CT:
+ rc = lpfc_bsg_rport_ct(job);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
+ * @job: fc_bsg_job that has timed out
+ *
+ * This function just aborts the job's IOCB. The aborted IOCB will return to
+ * the waiting function which will handle passing the error back to userspace
+ */
+int
+lpfc_bsg_timeout(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+ if (cmdiocb)
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+
+ return 0;
+}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index d2a922997c0..0830f37409a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -21,9 +21,11 @@
typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
-void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli_read_link_ste(struct lpfc_hba *);
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -135,6 +137,9 @@ int lpfc_els_disc_adisc(struct lpfc_vport *);
int lpfc_els_disc_plogi(struct lpfc_vport *);
void lpfc_els_timeout(unsigned long);
void lpfc_els_timeout_handler(struct lpfc_vport *);
+struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
+ uint8_t, struct lpfc_nodelist *,
+ uint32_t, uint32_t);
void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -182,11 +187,12 @@ int lpfc_mbox_dev_check(struct lpfc_hba *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
-void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
+void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
+int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
@@ -234,6 +240,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -360,3 +367,8 @@ void lpfc_start_fdiscs(struct lpfc_hba *phba);
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3
+/* functions to support SGIOv4/bsg interface */
+int lpfc_bsg_request(struct fc_bsg_job *);
+int lpfc_bsg_timeout(struct fc_bsg_job *);
+void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0e532f072eb..9df7ed38e1b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -97,6 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct list_head head;
struct lpfc_dmabuf *bdeBuf;
+ lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
+
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f72fdf23bf1..45337cd23fe 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -146,7 +146,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* Pointer to the newly allocated/prepared els iocb data structure
* NULL - when els iocb data structure allocation/preparation failed
**/
-static struct lpfc_iocbq *
+struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
uint16_t cmdSize, uint8_t retry,
struct lpfc_nodelist *ndlp, uint32_t did,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ed46b24a338..e6a47e25b21 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -61,6 +61,7 @@ static uint8_t lpfcAlpaArray[] = {
static void lpfc_disc_timeout_handler(struct lpfc_vport *);
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
+static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void
lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -1009,9 +1010,15 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_lock_irqsave(&phba->hbalock, flags);
phba->fcf.fcf_flag |= FCF_REGISTERED;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* If there is a pending FCoE event, restart FCF table scan. */
+ if (lpfc_check_pending_fcoe_event(phba, 1)) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+ }
if (vport->port_state != LPFC_FLOGI) {
spin_lock_irqsave(&phba->hbalock, flags);
phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_initial_flogi(vport);
}
@@ -1054,6 +1061,39 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
}
/**
+ * lpfc_sw_name_match - Check if the fcf switch name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's switch name with provided
+ * switch name. If the switch name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
+{
+ if ((sw_name[0] ==
+ bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
+ (sw_name[1] ==
+ bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
+ (sw_name[2] ==
+ bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
+ (sw_name[3] ==
+ bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
+ (sw_name[4] ==
+ bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
+ (sw_name[5] ==
+ bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
+ (sw_name[6] ==
+ bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
+ (sw_name[7] ==
+ bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
* lpfc_mac_addr_match - Check if the fcf mac address match.
* @phba: pointer to lpfc hba data structure.
* @new_fcf_record: pointer to fcf record.
@@ -1123,6 +1163,22 @@ lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
phba->fcf.priority = new_fcf_record->fip_priority;
+ phba->fcf.switch_name[0] =
+ bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
+ phba->fcf.switch_name[1] =
+ bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
+ phba->fcf.switch_name[2] =
+ bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
+ phba->fcf.switch_name[3] =
+ bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
+ phba->fcf.switch_name[4] =
+ bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
+ phba->fcf.switch_name[5] =
+ bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
+ phba->fcf.switch_name[6] =
+ bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
+ phba->fcf.switch_name[7] =
+ bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
}
/**
@@ -1150,6 +1206,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
/* The FCF is already registered, start discovery */
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->pport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(phba->pport);
@@ -1239,9 +1296,12 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
- new_fcf_record))
+ new_fcf_record))
+ continue;
+ if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
+ !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
+ new_fcf_record))
continue;
-
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
/*
* If the vlan bit map does not have the bit set for the
@@ -1336,6 +1396,60 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
}
/**
+ * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
+ * @phba: pointer to lpfc hba data structure.
+ * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
+ *
+ * This function check if there is any fcoe event pending while driver
+ * scan FCF entries. If there is any pending event, it will restart the
+ * FCF saning and return 1 else return 0.
+ */
+int
+lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+ /*
+ * If the Link is up and no FCoE events while in the
+ * FCF discovery, no need to restart FCF discovery.
+ */
+ if ((phba->link_state >= LPFC_LINK_UP) &&
+ (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+ return 0;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (phba->link_state >= LPFC_LINK_UP)
+ lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+ if (unreg_fcf) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_DISCOVERY|LOG_MBOX,
+ "2610 UNREG_FCFI mbox allocation failed\n");
+ return 1;
+ }
+ lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2611 UNREG_FCFI issue mbox failed\n");
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+
+ return 1;
+}
+
+/**
* lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
@@ -1367,6 +1481,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
unsigned long flags;
uint16_t vlan_id;
+ /* If there is pending FCoE event restart FCF table scan */
+ if (lpfc_check_pending_fcoe_event(phba, 0)) {
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return;
+ }
+
/* Get the first SGE entry from the non-embedded DMA memory. This
* routine only uses a single SGE.
*/
@@ -1424,7 +1544,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_lock_irqsave(&phba->hbalock, flags);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
if (lpfc_fab_name_match(phba->fcf.fabric_name,
- new_fcf_record) &&
+ new_fcf_record) &&
+ lpfc_sw_name_match(phba->fcf.switch_name,
+ new_fcf_record) &&
lpfc_mac_addr_match(phba, new_fcf_record)) {
phba->fcf.fcf_flag |= FCF_AVAILABLE;
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1464,9 +1586,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* If there is a record with lower priority value for
* the current FCF, use that record.
*/
- if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
- && (new_fcf_record->fip_priority <
- phba->fcf.priority)) {
+ if (lpfc_fab_name_match(phba->fcf.fabric_name,
+ new_fcf_record) &&
+ (new_fcf_record->fip_priority < phba->fcf.priority)) {
/* Use this FCF record */
lpfc_copy_fcf_record(phba, new_fcf_record);
phba->fcf.addr_mode = addr_mode;
@@ -1512,6 +1634,39 @@ out:
}
/**
+ * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vpi mailbox command.
+ */
+static void
+lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX,
+ "2609 Init VPI mailbox failed 0x%x\n",
+ mboxq->u.mb.mbxStatus);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ return;
+ }
+ vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+ lpfc_initial_fdisc(vport);
+ else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_ELS,
+ "2606 No NPIV Fabric support\n");
+ }
+ return;
+}
+
+/**
* lpfc_start_fdiscs - send fdiscs for each vports on this port.
* @phba: pointer to lpfc hba data structure.
*
@@ -1523,6 +1678,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
int i;
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@@ -1540,6 +1697,29 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
FC_VPORT_LINKDOWN);
continue;
}
+ if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+ mboxq = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_vlog(vports[i], KERN_ERR,
+ LOG_MBOX, "2607 Failed to allocate "
+ "init_vpi mailbox\n");
+ continue;
+ }
+ lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
+ mboxq->vport = vports[i];
+ mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mboxq,
+ MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_vlog(vports[i], KERN_ERR,
+ LOG_MBOX, "2608 Failed to issue "
+ "init_vpi mailbox\n");
+ mempool_free(mboxq,
+ phba->mbox_mem_pool);
+ }
+ continue;
+ }
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vports[i]);
else {
@@ -1769,6 +1949,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
goto out;
}
} else {
+ vport->port_state = LPFC_VPORT_UNKNOWN;
/*
* Add the driver's default FCF record at FCF index 0 now. This
* is phase 1 implementation that support FCF index 0 and driver
@@ -1804,6 +1985,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
* The driver is expected to do FIP/FCF. Call the port
* and get the FCF Table.
*/
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli4_read_fcf_record(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc)
@@ -2113,13 +2300,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
struct static_vport_info *vport_info;
- int rc, i;
+ int rc = 0, i;
struct fc_vport_identifiers vport_id;
struct fc_vport *new_fc_vport;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
uint16_t offset = 0;
uint8_t *vport_buff;
+ struct lpfc_dmabuf *mp;
+ uint32_t byte_count = 0;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -2142,7 +2331,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
- lpfc_dump_static_vport(phba, pmb, offset);
+ if (lpfc_dump_static_vport(phba, pmb, offset))
+ goto out;
+
pmb->vport = phba->pport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
@@ -2155,17 +2346,30 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
goto out;
}
- if (mb->un.varDmp.word_cnt >
- sizeof(struct static_vport_info) - offset)
- mb->un.varDmp.word_cnt =
- sizeof(struct static_vport_info) - offset;
-
- lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
- vport_buff + offset,
- mb->un.varDmp.word_cnt);
- offset += mb->un.varDmp.word_cnt;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ byte_count = pmb->u.mqe.un.mb_words[5];
+ mp = (struct lpfc_dmabuf *) pmb->context2;
+ if (byte_count > sizeof(struct static_vport_info) -
+ offset)
+ byte_count = sizeof(struct static_vport_info)
+ - offset;
+ memcpy(vport_buff + offset, mp->virt, byte_count);
+ offset += byte_count;
+ } else {
+ if (mb->un.varDmp.word_cnt >
+ sizeof(struct static_vport_info) - offset)
+ mb->un.varDmp.word_cnt =
+ sizeof(struct static_vport_info)
+ - offset;
+ byte_count = mb->un.varDmp.word_cnt;
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ vport_buff + offset,
+ byte_count);
+
+ offset += byte_count;
+ }
- } while (mb->un.varDmp.word_cnt &&
+ } while (byte_count &&
offset < sizeof(struct static_vport_info));
@@ -2198,7 +2402,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if (!new_fc_vport) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0546 lpfc_create_static_vport failed to"
- " create vport \n");
+ " create vport\n");
continue;
}
@@ -2207,16 +2411,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
}
out:
- /*
- * If this is timed out command, setting NULL to context2 tell SLI
- * layer not to use this buffer.
- */
- spin_lock_irq(&phba->hbalock);
- pmb->context2 = NULL;
- spin_unlock_irq(&phba->hbalock);
kfree(vport_info);
- if (rc != MBX_TIMEOUT)
+ if (rc != MBX_TIMEOUT) {
+ if (pmb->context2) {
+ mp = (struct lpfc_dmabuf *) pmb->context2;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
mempool_free(pmb, phba->mbox_mem_pool);
+ }
return;
}
@@ -4360,7 +4563,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
buff;
fcoe_param = (struct lpfc_fcoe_params *)
- buff + sizeof(struct lpfc_fip_param_hdr);
+ (buff + sizeof(struct lpfc_fip_param_hdr));
if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 8a3a026667e..ccb26724dc5 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2496,8 +2496,8 @@ typedef struct {
#define DMP_VPORT_REGION_SIZE 0x200
#define DMP_MBOX_OFFSET_WORD 0x5
-#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
-#define DMP_FCOEPARAM_RGN_SIZE 0x400
+#define DMP_REGION_23 0x17 /* fcoe param and port state region */
+#define DMP_RGN23_SIZE 0x400
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2995d128f07..3689eee0453 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,6 +52,31 @@ struct dma_address {
uint32_t addr_hi;
};
+#define LPFC_SLIREV_CONF_WORD 0x58
+struct lpfc_sli_intf {
+ uint32_t word0;
+#define lpfc_sli_intf_iftype_MASK 0x00000007
+#define lpfc_sli_intf_iftype_SHIFT 0
+#define lpfc_sli_intf_iftype_WORD word0
+#define lpfc_sli_intf_rev_MASK 0x0000000f
+#define lpfc_sli_intf_rev_SHIFT 4
+#define lpfc_sli_intf_rev_WORD word0
+#define LPFC_SLIREV_CONF_SLI4 4
+#define lpfc_sli_intf_family_MASK 0x000000ff
+#define lpfc_sli_intf_family_SHIFT 8
+#define lpfc_sli_intf_family_WORD word0
+#define lpfc_sli_intf_feat1_MASK 0x000000ff
+#define lpfc_sli_intf_feat1_SHIFT 16
+#define lpfc_sli_intf_feat1_WORD word0
+#define lpfc_sli_intf_feat2_MASK 0x0000001f
+#define lpfc_sli_intf_feat2_SHIFT 24
+#define lpfc_sli_intf_feat2_WORD word0
+#define lpfc_sli_intf_valid_MASK 0x00000007
+#define lpfc_sli_intf_valid_SHIFT 29
+#define lpfc_sli_intf_valid_WORD word0
+#define LPFC_SLI_INTF_VALID 6
+};
+
#define LPFC_SLI4_BAR0 1
#define LPFC_SLI4_BAR1 2
#define LPFC_SLI4_BAR2 4
@@ -1181,6 +1206,32 @@ struct fcf_record {
#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
#define lpfc_fcf_record_fcf_state_WORD word8
uint8_t vlan_bitmap[512];
+ uint32_t word137;
+#define lpfc_fcf_record_switch_name_0_SHIFT 0
+#define lpfc_fcf_record_switch_name_0_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_0_WORD word137
+#define lpfc_fcf_record_switch_name_1_SHIFT 8
+#define lpfc_fcf_record_switch_name_1_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_1_WORD word137
+#define lpfc_fcf_record_switch_name_2_SHIFT 16
+#define lpfc_fcf_record_switch_name_2_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_2_WORD word137
+#define lpfc_fcf_record_switch_name_3_SHIFT 24
+#define lpfc_fcf_record_switch_name_3_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_3_WORD word137
+ uint32_t word138;
+#define lpfc_fcf_record_switch_name_4_SHIFT 0
+#define lpfc_fcf_record_switch_name_4_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_4_WORD word138
+#define lpfc_fcf_record_switch_name_5_SHIFT 8
+#define lpfc_fcf_record_switch_name_5_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_5_WORD word138
+#define lpfc_fcf_record_switch_name_6_SHIFT 16
+#define lpfc_fcf_record_switch_name_6_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_6_WORD word138
+#define lpfc_fcf_record_switch_name_7_SHIFT 24
+#define lpfc_fcf_record_switch_name_7_MASK 0x000000FF
+#define lpfc_fcf_record_switch_name_7_WORD word138
};
struct lpfc_mbx_read_fcf_tbl {
@@ -1385,20 +1436,17 @@ struct lpfc_mbx_unreg_vfi {
struct lpfc_mbx_resume_rpi {
uint32_t word1;
-#define lpfc_resume_rpi_rpi_SHIFT 0
-#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
-#define lpfc_resume_rpi_rpi_WORD word1
+#define lpfc_resume_rpi_index_SHIFT 0
+#define lpfc_resume_rpi_index_MASK 0x0000FFFF
+#define lpfc_resume_rpi_index_WORD word1
+#define lpfc_resume_rpi_ii_SHIFT 30
+#define lpfc_resume_rpi_ii_MASK 0x00000003
+#define lpfc_resume_rpi_ii_WORD word1
+#define RESUME_INDEX_RPI 0
+#define RESUME_INDEX_VPI 1
+#define RESUME_INDEX_VFI 2
+#define RESUME_INDEX_FCFI 3
uint32_t event_tag;
- uint32_t word3_rsvd;
- uint32_t word4_rsvd;
- uint32_t word5_rsvd;
- uint32_t word6;
-#define lpfc_resume_rpi_vpi_SHIFT 0
-#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
-#define lpfc_resume_rpi_vpi_WORD word6
-#define lpfc_resume_rpi_vfi_SHIFT 16
-#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
-#define lpfc_resume_rpi_vfi_WORD word6
};
#define REG_FCF_INVALID_QID 0xFFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fc67cc65c63..562d8cee874 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -211,7 +211,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
goto out_free_mbox;
do {
- lpfc_dump_mem(phba, pmb, offset);
+ lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
@@ -425,6 +425,9 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
+ /* Check if the port is disabled */
+ lpfc_sli_read_link_ste(phba);
+
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
phba->cfg_hba_queue_depth =
@@ -524,27 +527,46 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
- lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
- pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- lpfc_set_loopback_flag(phba);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ if (phba->hba_flag & LINK_DISABLED) {
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "2598 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "2599 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ } else {
+ lpfc_init_link(phba, pmb, phba->cfg_topology,
+ phba->cfg_link_speed);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ lpfc_set_loopback_flag(phba);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0454 Adapter failed to init, mbxCmd x%x "
"INIT_LINK, mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
- /* Clear all interrupt enable conditions */
- writel(0, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
- /* Clear all pending interrupts */
- writel(0xffffffff, phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
- phba->link_state = LPFC_HBA_ERROR;
- if (rc != MBX_BUSY)
- mempool_free(pmb, phba->mbox_mem_pool);
- return -EIO;
+ phba->link_state = LPFC_HBA_ERROR;
+ if (rc != MBX_BUSY)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
}
/* MBOX buffer will be freed in mbox compl */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -558,7 +580,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
KERN_ERR,
LOG_INIT,
"0456 Adapter failed to issue "
- "ASYNCEVT_ENABLE mbox status x%x \n.",
+ "ASYNCEVT_ENABLE mbox status x%x\n",
rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
@@ -572,7 +594,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
- "to get Option ROM version status x%x\n.", rc);
+ "to get Option ROM version status x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
@@ -2133,6 +2155,8 @@ lpfc_online(struct lpfc_hba *phba)
vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -2807,6 +2831,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
return;
+ phba->fcoe_eventtag = acqe_link->event_tag;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -2894,18 +2919,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
int rc;
+ phba->fcoe_eventtag = acqe_fcoe->event_tag;
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "2546 New FCF found index 0x%x tag 0x%x \n",
+ "2546 New FCF found index 0x%x tag 0x%x\n",
acqe_fcoe->fcf_index,
acqe_fcoe->event_tag);
/*
- * If the current FCF is in discovered state,
- * do nothing.
+ * If the current FCF is in discovered state, or
+ * FCF discovery is in progress do nothing.
*/
spin_lock_irq(&phba->hbalock);
- if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
+ if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
+ (phba->hba_flag & FCF_DISC_INPROGRESS)) {
spin_unlock_irq(&phba->hbalock);
break;
}
@@ -2922,7 +2949,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2548 FCF Table full count 0x%x tag 0x%x \n",
+ "2548 FCF Table full count 0x%x tag 0x%x\n",
bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
acqe_fcoe->event_tag);
break;
@@ -2930,7 +2957,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"2549 FCF disconnected fron network index 0x%x"
- " tag 0x%x \n", acqe_fcoe->fcf_index,
+ " tag 0x%x\n", acqe_fcoe->fcf_index,
acqe_fcoe->event_tag);
/* If the event is not for currently used fcf do nothing */
if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
@@ -4130,8 +4157,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
/* Allocate memory for HBA structure */
phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
if (!phba) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1417 Failed to allocate hba struct.\n");
+ dev_err(&pdev->dev, "failed to allocate hba struct\n");
return NULL;
}
@@ -4145,6 +4171,9 @@ lpfc_hba_alloc(struct pci_dev *pdev)
return NULL;
}
+ mutex_init(&phba->ct_event_mutex);
+ INIT_LIST_HEAD(&phba->ct_ev_waiters);
+
return phba;
}
@@ -4489,23 +4518,6 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
if (!phba->sli4_hba.STAregaddr)
return -ENODEV;
- /* With uncoverable error, log the error message and return error */
- onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
- onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
- if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
- uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
- uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
- if (uerrlo_reg.word0 || uerrhi_reg.word0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1422 HBA Unrecoverable error: "
- "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
- "online0_reg=0x%x, online1_reg=0x%x\n",
- uerrlo_reg.word0, uerrhi_reg.word0,
- onlnreg0, onlnreg1);
- }
- return -ENODEV;
- }
-
/* Wait up to 30 seconds for the SLI Port POST done and ready */
for (i = 0; i < 3000; i++) {
sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
@@ -4545,6 +4557,23 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
+ /* With uncoverable error, log the error message and return error */
+ onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+ onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+ if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+ uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
+ uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
+ if (uerrlo_reg.word0 || uerrhi_reg.word0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1422 HBA Unrecoverable error: "
+ "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+ "online0_reg=0x%x, online1_reg=0x%x\n",
+ uerrlo_reg.word0, uerrhi_reg.word0,
+ onlnreg0, onlnreg1);
+ }
+ return -ENODEV;
+ }
+
return port_error;
}
@@ -7347,6 +7376,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
+ /* Check if there are static vports to be created. */
+ lpfc_create_static_vport(phba);
+
return 0;
out_disable_intr:
@@ -7636,19 +7668,17 @@ static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
int rc;
- uint16_t dev_id;
+ struct lpfc_sli_intf intf;
- if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
+ if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
return -ENODEV;
- switch (dev_id) {
- case PCI_DEVICE_ID_TIGERSHARK:
+ if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
+ (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
rc = lpfc_pci_probe_one_s4(pdev, pid);
- break;
- default:
+ else
rc = lpfc_pci_probe_one_s3(pdev, pid);
- break;
- }
+
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 3423571dd1b..1ab405902a1 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -52,48 +52,85 @@
* This routine prepares the mailbox command for dumping list of static
* vports to be created.
**/
-void
+int
lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
uint16_t offset)
{
MAILBOX_t *mb;
- void *ctx;
+ struct lpfc_dmabuf *mp;
mb = &pmb->u.mb;
- ctx = pmb->context2;
/* Setup to dump vport info region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_DUMP_MEMORY;
- mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = offset;
mb->un.varDmp.region_id = DMP_REGION_VPORT;
- mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
- mb->un.varDmp.co = 0;
- mb->un.varDmp.resp_offset = 0;
- pmb->context2 = ctx;
mb->mbxOwner = OWN_HOST;
- return;
+ /* For SLI3 HBAs data is embedded in mailbox */
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+ return 0;
+ }
+
+ /* For SLI4 HBAs driver need to allocate memory */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+ if (!mp || !mp->virt) {
+ kfree(mp);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2605 lpfc_dump_static_vport: memory"
+ " allocation failed\n");
+ return 1;
+ }
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+ INIT_LIST_HEAD(&mp->list);
+ /* save address for completion */
+ pmb->context2 = (uint8_t *) mp;
+ mb->un.varWords[3] = putPaddrLow(mp->phys);
+ mb->un.varWords[4] = putPaddrHigh(mp->phys);
+ mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
+
+ return 0;
+}
+
+/**
+ * lpfc_down_link - Bring down HBAs link.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares a mailbox command to bring down HBA link.
+ **/
+void
+lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb = &pmb->u.mb;
+ mb->mbxCommand = MBX_DOWN_LINK;
+ mb->mbxOwner = OWN_HOST;
}
/**
- * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
+ * lpfc_dump_mem - Prepare a mailbox command for reading a region.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
- * @offset: offset for dumping VPD memory mailbox command.
+ * @offset: offset into the region.
+ * @region_id: config region id.
*
* The dump mailbox command provides a method for the device driver to obtain
* various types of information from the HBA device.
*
- * This routine prepares the mailbox command for dumping HBA Vital Product
- * Data (VPD) memory. This mailbox command is to be used for retrieving a
- * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
- * offset specified by the offset parameter.
+ * This routine prepares the mailbox command for dumping HBA's config region.
**/
void
-lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
+lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
+ uint16_t region_id)
{
MAILBOX_t *mb;
void *ctx;
@@ -107,7 +144,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = offset;
- mb->un.varDmp.region_id = DMP_REGION_VPD;
+ mb->un.varDmp.region_id = region_id;
mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
@@ -1789,6 +1826,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
/**
* lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @phba: pointer to the hba structure to init the VPI for.
* @mbox: pointer to lpfc mbox command to initialize.
* @vpi: VPI to be initialized.
*
@@ -1799,11 +1837,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
* successful virtual NPort login.
**/
void
-lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
+lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
- bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
+ bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
+ vpi + phba->vpi_base);
+ bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
+ phba->pport->vfi + phba->vfi_base);
}
/**
@@ -1852,7 +1893,7 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
/* dump_fcoe_param failed to allocate memory */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"2569 lpfc_dump_fcoe_param: memory"
- " allocation failed \n");
+ " allocation failed\n");
return 1;
}
@@ -1864,8 +1905,8 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
- mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
- mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
+ mb->un.varDmp.region_id = DMP_REGION_23;
+ mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
return 0;
@@ -1938,9 +1979,7 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
- bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
- bf_set(lpfc_resume_rpi_vpi, resume_rpi,
- ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
- bf_set(lpfc_resume_rpi_vfi, resume_rpi,
- ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
+ bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
+ bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
+ resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index e198c917c13..a1b6db6016d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -110,17 +110,28 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
sizeof(struct lpfc_nodelist));
if (!phba->nlp_mem_pool)
goto fail_free_mbox_pool;
- phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
phba->pcidev,
LPFC_HDR_BUF_SIZE, align, 0);
- if (!phba->lpfc_hrb_pool)
- goto fail_free_nlp_mem_pool;
- phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+ if (!phba->lpfc_hrb_pool)
+ goto fail_free_nlp_mem_pool;
+
+ phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
phba->pcidev,
LPFC_DATA_BUF_SIZE, align, 0);
- if (!phba->lpfc_drb_pool)
- goto fail_free_hbq_pool;
-
+ if (!phba->lpfc_drb_pool)
+ goto fail_free_hrb_pool;
+ phba->lpfc_hbq_pool = NULL;
+ } else {
+ phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
+ phba->pcidev, LPFC_BPL_SIZE, align, 0);
+ if (!phba->lpfc_hbq_pool)
+ goto fail_free_nlp_mem_pool;
+ phba->lpfc_hrb_pool = NULL;
+ phba->lpfc_drb_pool = NULL;
+ }
/* vpi zero is reserved for the physical port so add 1 to max */
longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
@@ -132,7 +143,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
fail_free_dbq_pool:
pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
- fail_free_hbq_pool:
+ fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
fail_free_nlp_mem_pool:
@@ -176,11 +187,17 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
- pci_pool_destroy(phba->lpfc_drb_pool);
+ if (phba->lpfc_drb_pool)
+ pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
- pci_pool_destroy(phba->lpfc_hrb_pool);
+ if (phba->lpfc_hrb_pool)
+ pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
+ if (phba->lpfc_hbq_pool)
+ pci_pool_destroy(phba->lpfc_hbq_pool);
+ phba->lpfc_hbq_pool = NULL;
+
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
@@ -380,7 +397,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
if (!hbqbp)
return NULL;
- hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+ hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
&hbqbp->dbuf.phys);
if (!hbqbp->dbuf.virt) {
kfree(hbqbp);
@@ -405,7 +422,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
- pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+ pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
kfree(hbqbp);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index 27d1a88a98f..d655ed3eebe 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -177,3 +177,23 @@ struct temp_event {
uint32_t data;
};
+/* bsg definitions */
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+
+struct set_ct_event {
+ uint32_t command;
+ uint32_t ev_req_id;
+ uint32_t ev_reg_id;
+};
+
+struct get_ct_event {
+ uint32_t command;
+ uint32_t ev_reg_id;
+ uint32_t ev_req_id;
+};
+
+struct get_ct_event_reply {
+ uint32_t immed_data;
+ uint32_t type;
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index da59c4f0168..61d08970380 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2142,7 +2142,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else if (resp_info & RESID_OVER) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"9028 FCP command x%x residual overrun error. "
- "Data: x%x x%x \n", cmnd->cmnd[0],
+ "Data: x%x x%x\n", cmnd->cmnd[0],
scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
@@ -2843,7 +2843,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
dif_op_str[scsi_get_prot_op(cmnd)]);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x \n",
+ "%02x %02x %02x %02x %02x\n",
cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -2871,7 +2871,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
dif_op_str[scsi_get_prot_op(cmnd)]);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x \n",
+ "%02x %02x %02x %02x %02x\n",
cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -3584,6 +3584,7 @@ struct scsi_host_template lpfc_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
+ .vendor_id = LPFC_NL_VENDOR_ID,
};
struct scsi_host_template lpfc_vport_template = {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index acc43b061ba..43cbe336f1f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4139,7 +4139,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
return -EIO;
}
data_length = mqe->un.mb_words[5];
- if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
+ if (data_length > DMP_RGN23_SIZE) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return -EIO;
@@ -4304,7 +4304,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
- "2570 Failed to read FCoE parameters \n");
+ "2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
vpd_size = PAGE_SIZE;
@@ -4522,12 +4522,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_sli4_rb_setup(phba);
/* Start the ELS watchdog timer */
- /*
- * The driver for SLI4 is not yet ready to process timeouts
- * or interrupts. Once it is, the comment bars can be removed.
- */
- /* mod_timer(&vport->els_tmofunc,
- * jiffies + HZ * (phba->fc_ratov*2)); */
+ mod_timer(&vport->els_tmofunc,
+ jiffies + HZ * (phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
@@ -4706,13 +4702,13 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (!pmbox) {
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* processing mbox queue from intr_handler */
if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return MBX_SUCCESS;
}
processing_queue = 1;
- phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
pmbox = lpfc_mbox_get(phba);
if (!pmbox) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -5279,6 +5275,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
unsigned long iflags;
int rc;
+ rc = lpfc_mbox_dev_check(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2544 Mailbox command x%x (x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
/* Detect polling mode and jump to a handler */
if (!phba->sli4_hba.intr_enable) {
if (flag == MBX_POLL)
@@ -5338,17 +5346,6 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
psli->sli_flag, flag);
goto out_not_finished;
}
- rc = lpfc_mbox_dev_check(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2544 Mailbox command x%x (x%x) "
- "cannot issue Data: x%x x%x\n",
- mboxq->vport ? mboxq->vport->vpi : 0,
- mboxq->u.mb.mbxCommand,
- lpfc_sli4_mbox_opcode_get(phba, mboxq),
- psli->sli_flag, flag);
- goto out_not_finished;
- }
/* Put the mailbox command to the driver internal FIFO */
psli->slistat.mbox_busy++;
@@ -5817,19 +5814,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
/**
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
- * @piocb: Pointer to command iocb.
*
* This routine performs a round robin SCSI command to SLI4 FCP WQ index
- * distribution.
+ * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
*
* Return: index into SLI4 fast-path FCP queue index.
**/
static uint32_t
-lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
- static uint32_t fcp_qidx;
+ ++phba->fcp_qidx;
+ if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
+ phba->fcp_qidx = 0;
- return fcp_qidx++ % phba->cfg_fcp_wq_count;
+ return phba->fcp_qidx;
}
/**
@@ -6156,7 +6155,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
if (piocb->iocb_flag & LPFC_IO_FCP) {
- fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
+ fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
return IOCB_ERROR;
} else {
@@ -6327,7 +6326,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
KERN_ERR,
LOG_SLI,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
- " evt_code 0x%x \n"
+ " evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
"W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
"W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
@@ -6790,6 +6789,33 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
/**
+ * lpfc_sli_bemem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between a data structure
+ * with big endian representation to local endianness.
+ * This function can be called with or without lock.
+ **/
+void
+lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+ uint32_t *src = srcp;
+ uint32_t *dest = destp;
+ uint32_t ldata;
+ int i;
+
+ for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
+ ldata = *src;
+ ldata = be32_to_cpu(ldata);
+ *dest = ldata;
+ src++;
+ dest++;
+ }
+}
+
+/**
* lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -7678,12 +7704,6 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
"online0_reg=0x%x, online1_reg=0x%x\n",
uerr_sta_lo, uerr_sta_hi,
onlnreg0, onlnreg1);
- /* TEMP: as the driver error recover logic is not
- * fully developed, we just log the error message
- * and the device error attention action is now
- * temporarily disabled.
- */
- return 0;
phba->work_status[0] = uerr_sta_lo;
phba->work_status[1] = uerr_sta_hi;
/* Set the driver HA work bitmap */
@@ -9499,8 +9519,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
eq->host_index = 0;
eq->hba_index = 0;
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@@ -9604,10 +9623,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
cq->host_index = 0;
cq->hba_index = 0;
-out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+out:
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@@ -9712,8 +9730,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
/* link the mq onto the parent cq child list */
list_add_tail(&mq->list, &cq->child_list);
out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@@ -9795,8 +9812,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@@ -9970,8 +9986,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
list_add_tail(&drq->list, &cq->child_list);
out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@@ -10026,8 +10041,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
/* Remove eq from any list */
list_del_init(&eq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, eq->phba->mbox_mem_pool);
+ mempool_free(mbox, eq->phba->mbox_mem_pool);
return status;
}
@@ -10080,8 +10094,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
}
/* Remove cq from any list */
list_del_init(&cq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, cq->phba->mbox_mem_pool);
+ mempool_free(mbox, cq->phba->mbox_mem_pool);
return status;
}
@@ -10134,8 +10147,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
}
/* Remove mq from any list */
list_del_init(&mq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, mq->phba->mbox_mem_pool);
+ mempool_free(mbox, mq->phba->mbox_mem_pool);
return status;
}
@@ -10187,8 +10199,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
}
/* Remove wq from any list */
list_del_init(&wq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, wq->phba->mbox_mem_pool);
+ mempool_free(mbox, wq->phba->mbox_mem_pool);
return status;
}
@@ -10258,8 +10269,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
list_del_init(&hrq->list);
list_del_init(&drq->list);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
return status;
}
@@ -10933,6 +10943,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -10945,6 +10956,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.un.rcvels.remoteID = sid;
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+ bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
}
iocbq = first_iocbq;
/*
@@ -10961,6 +10974,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
iocbq->iocb.ulpBdeCount++;
iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+ bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
@@ -10978,6 +10993,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
iocbq->iocb.ulpBdeCount = 1;
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+ bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
iocbq->iocb.un.rcvels.remoteID = sid;
list_add_tail(&iocbq->list, &first_iocbq->list);
}
@@ -11324,7 +11341,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
- lpfc_init_vpi(mboxq, vpi);
+ lpfc_init_vpi(phba, mboxq, vpi);
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
if (rc != MBX_TIMEOUT)
@@ -11519,6 +11536,7 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
uint32_t alloc_len, req_len;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
+ phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11570,7 +11588,140 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
if (rc == MBX_NOT_FINISHED) {
lpfc_sli4_mbox_cmd_free(phba, mboxq);
error = -EIO;
- } else
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= FCF_DISC_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
error = 0;
+ }
return error;
}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmb = NULL;
+ MAILBOX_t *mb;
+ uint8_t *rgn23_data = NULL;
+ uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2600 lpfc_sli_read_serdes_param failed to"
+ " allocate mailbox memory\n");
+ goto out;
+ }
+ mb = &pmb->u.mb;
+
+ /* Get adapter Region 23 data */
+ rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+ if (!rgn23_data)
+ goto out;
+
+ do {
+ lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2601 lpfc_sli_read_link_ste failed to"
+ " read config region 23 rc 0x%x Status 0x%x\n",
+ rc, mb->mbxStatus);
+ mb->un.varDmp.word_cnt = 0;
+ }
+ /*
+ * dump mem may return a zero when finished or we got a
+ * mailbox error, either way we are done.
+ */
+ if (mb->un.varDmp.word_cnt == 0)
+ break;
+ if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
+ mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
+
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ rgn23_data + offset,
+ mb->un.varDmp.word_cnt);
+ offset += mb->un.varDmp.word_cnt;
+ } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
+
+ data_size = offset;
+ offset = 0;
+
+ if (!data_size)
+ goto out;
+
+ /* Check the region signature first */
+ if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2619 Config region 23 has bad signature\n");
+ goto out;
+ }
+ offset += 4;
+
+ /* Check the data structure version */
+ if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2620 Config region 23 has bad version\n");
+ goto out;
+ }
+ offset += 4;
+
+ /* Parse TLV entries in the region */
+ while (offset < data_size) {
+ if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
+ break;
+ /*
+ * If the TLV is not driver specific TLV or driver id is
+ * not linux driver id, skip the record.
+ */
+ if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
+ (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
+ (rgn23_data[offset + 3] != 0)) {
+ offset += rgn23_data[offset + 1] * 4 + 4;
+ continue;
+ }
+
+ /* Driver found a driver specific TLV in the config region */
+ sub_tlv_len = rgn23_data[offset + 1] * 4;
+ offset += 4;
+ tlv_offset = 0;
+
+ /*
+ * Search for configured port state sub-TLV.
+ */
+ while ((offset < data_size) &&
+ (tlv_offset < sub_tlv_len)) {
+ if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
+ offset += 4;
+ tlv_offset += 4;
+ break;
+ }
+ if (rgn23_data[offset] != PORT_STE_TYPE) {
+ offset += rgn23_data[offset + 1] * 4 + 4;
+ tlv_offset += rgn23_data[offset + 1] * 4 + 4;
+ continue;
+ }
+
+ /* This HBA contains PORT_STE configured */
+ if (!rgn23_data[offset + 2])
+ phba->hba_flag |= LINK_DISABLED;
+
+ goto out;
+ }
+ }
+out:
+ if (pmb)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ kfree(rgn23_data);
+ return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3b276b47d18..b5f4ba1a5c2 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -132,6 +132,7 @@ struct lpfc_sli4_link {
struct lpfc_fcf {
uint8_t fabric_name[8];
+ uint8_t switch_name[8];
uint8_t mac_addr[6];
uint16_t fcf_indx;
uint16_t fcfi;
@@ -150,6 +151,10 @@ struct lpfc_fcf {
#define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1
#define LPFC_REGION23_LAST_REC 0xff
+#define DRIVER_SPECIFIC_TYPE 0xA2
+#define LINUX_DRIVER_ID 0x20
+#define PORT_STE_TYPE 0x1
+
struct lpfc_fip_param_hdr {
uint8_t type;
#define FCOE_PARAM_TYPE 0xA0
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41094e02304..9ae20af4bdb 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.3"
+#define LPFC_DRIVER_VERSION "8.3.4"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e0b49922193..606efa76754 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -313,22 +313,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
- /*
- * In SLI4, the vpi must be activated before it can be used
- * by the port.
- */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- rc = lpfc_sli4_init_vpi(phba, vpi);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
- "1838 Failed to INIT_VPI on vpi %d "
- "status %d\n", vpi, rc);
- rc = VPORT_NORESOURCES;
- lpfc_free_vpi(phba, vpi);
- goto error_out;
- }
- }
-
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -367,12 +351,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
- memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
- memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
- if (fc_vport->node_name != 0)
- u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
- if (fc_vport->port_name != 0)
- u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
+ u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
+ u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
@@ -404,7 +384,34 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
+ /*
+ * In SLI4, the vpi must be activated before it can be used
+ * by the port.
+ */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (pport->vfi_state & LPFC_VFI_REGISTERED)) {
+ rc = lpfc_sli4_init_vpi(phba, vpi);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1838 Failed to INIT_VPI on vpi %d "
+ "status %d\n", vpi, rc);
+ rc = VPORT_NORESOURCES;
+ lpfc_free_vpi(phba, vpi);
+ goto error_out;
+ }
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ /*
+ * Driver cannot INIT_VPI now. Set the flags to
+ * init_vpi when reg_vfi complete.
+ */
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ rc = VPORT_OK;
+ goto out;
+ }
+
if ((phba->link_state < LPFC_LINK_UP) ||
+ (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
(phba->fc_topology == TOPOLOGY_LOOP)) {
lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
rc = VPORT_OK;
@@ -661,7 +668,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
- "delete objects on fabric. \n");
+ "delete objects on fabric\n");
}
/* First look for the Fabric ndlp */
ndlp = lpfc_findnode_did(vport, Fabric_DID);