aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c9
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c8
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c6
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h71
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c338
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi.c105
-rw-r--r--drivers/scsi/scsi_error.c92
-rw-r--r--drivers/scsi/scsi_lib.c59
-rw-r--r--drivers/scsi/scsi_netlink.c523
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_proc.c8
-rw-r--r--drivers/scsi/scsi_scan.c23
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_tgt_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c122
-rw-r--r--drivers/scsi/sg.c667
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/tmscsim.c4
52 files changed, 1456 insertions, 1028 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c7f06298bd3..d3b211af4e1 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -63,7 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
- select CRC_T10DIF
+ select CRC_T10DIF if BLK_DEV_INTEGRITY
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
@@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS
To compile this driver as a module, choose M here: the
module will be called qlogicfas.
-config SCSI_QLOGIC_FC_FIRMWARE
- bool "Include loadable firmware in driver"
- depends on SCSI_QLOGIC_FC
- help
- Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with
- expanded LUN addressing and FcTape (FCP-2) support, in the
- qlogicfc driver. This is required on some platforms.
-
config SCSI_QLOGIC_1280
tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
depends on PCI && SCSI
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index aa4e77c2527..8abfd06b5a7 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
- timeout = cmd->timeout_per_command/HZ;
+ timeout = cmd->request->timeout/HZ;
if (timeout == 0)
timeout = 1;
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 994da56fffe..708e475896b 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -425,7 +425,7 @@ static int alua_check_sense(struct scsi_device *sdev,
/*
* LUN Not Accessible - ALUA state transition
*/
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b)
/*
* LUN Not Accessible -- Target port in standby state
@@ -447,18 +447,18 @@ static int alua_check_sense(struct scsi_device *sdev,
/*
* Power On, Reset, or Bus Device Reset, just retry.
*/
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
/*
* ALUA state changed
*/
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
}
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
/*
* Implicit ALUA state transition failed
*/
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
}
break;
}
@@ -490,7 +490,7 @@ static int alua_stpg(struct scsi_device *sdev, int state,
if (!err)
return SCSI_DH_IO;
err = alua_check_sense(sdev, &sense_hdr);
- if (retry > 0 && err == NEEDS_RETRY) {
+ if (retry > 0 && err == ADD_TO_MLQUEUE) {
retry--;
goto retry;
}
@@ -535,7 +535,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
return SCSI_DH_IO;
err = alua_check_sense(sdev, &sense_hdr);
- if (err == NEEDS_RETRY)
+ if (err == ADD_TO_MLQUEUE)
goto retry;
sdev_printk(KERN_INFO, sdev,
"%s: rtpg sense code %02x/%02x/%02x\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index b9d23e9e9a4..8f45570a8a0 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -84,7 +84,7 @@ struct clariion_dh_data {
/*
* I/O buffer for both MODE_SELECT and INQUIRY commands.
*/
- char buffer[CLARIION_BUFFER_SIZE];
+ unsigned char buffer[CLARIION_BUFFER_SIZE];
/*
* SCSI sense buffer for commands -- assumes serial issuance
* and completion sequence of all commands for same multipath.
@@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
err = SCSI_DH_DEV_TEMP_BUSY;
goto out;
}
- if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) {
+ if (csdev->buffer[4] > 2) {
/* Invalid buffer format */
sdev_printk(KERN_NOTICE, sdev,
"%s: invalid VPD page 0xC0 format\n",
@@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
return NULL;
}
- memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd_len = COMMAND_SIZE(cmd);
rq->cmd[0] = cmd;
@@ -439,7 +438,7 @@ static int clariion_check_sense(struct scsi_device *sdev,
* Unit Attention Code. This is the first IO
* to the new path, so just retry.
*/
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
break;
}
@@ -514,7 +513,7 @@ retry:
return SCSI_DH_IO;
err = clariion_check_sense(sdev, &sshdr);
- if (retry > 0 && err == NEEDS_RETRY) {
+ if (retry > 0 && err == ADD_TO_MLQUEUE) {
retry--;
goto retry;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index a6a4ef3ad51..5e93c88ad66 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -114,7 +114,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
- memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
req->sense = h->sense;
@@ -207,7 +206,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(START_STOP);
- memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */
req->timeout = HP_SW_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2dee69da35c..50bf95f3b5c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -225,8 +225,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return NULL;
}
- memset(rq->cmd, 0, BLK_MAX_CDB);
-
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
rq->retries = RDAC_RETRIES;
@@ -551,7 +549,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
*
* Just retry and wait.
*/
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
break;
case ILLEGAL_REQUEST:
if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
@@ -568,7 +566,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
/*
* Power On, Reset, or Bus Device Reset, just retry.
*/
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
break;
}
/* success just means we do not care what scsi-ml does */
@@ -590,6 +588,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
{"SUN", "LCSM100_F"},
+ {"DELL", "MD3000"},
+ {"DELL", "MD3000i"},
{NULL, NULL},
};
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 822d5214692..c387c15a212 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
/* use request field to save the ptr. to completion struct. */
scp->request = (struct request *)&wait;
- scp->timeout_per_command = timeout*HZ;
scp->cmd_len = 12;
scp->cmnd = cmnd;
cmndinfo.priority = IOCTL_PRI;
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
register Scsi_Cmnd *pscp;
register Scsi_Cmnd *nscp;
ulong flags;
- unchar b, t;
TRACE(("gdth_putq() priority %d\n",priority));
spin_lock_irqsave(&ha->smp_lock, flags);
- if (!cmndinfo->internal_command) {
+ if (!cmndinfo->internal_command)
cmndinfo->priority = priority;
- b = scp->device->channel;
- t = scp->device->id;
- if (priority >= DEFAULT_PRI) {
- if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
- (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
- TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
- cmndinfo->timeout = gdth_update_timeout(scp, 0);
- }
- }
- }
if (ha->req_first==NULL) {
ha->req_first = scp; /* queue was empty */
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
return ((const char *)ha->binfo.type_string);
}
+static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
+{
+ gdth_ha_str *ha = shost_priv(scp->device->host);
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ unchar b, t;
+ ulong flags;
+ enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
+
+ TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
+ b = scp->device->channel;
+ t = scp->device->id;
+
+ /*
+ * We don't really honor the command timeout, but we try to
+ * honor 6 times of the actual command timeout! So reset the
+ * timer if this is less than 6th timeout on this command!
+ */
+ if (++cmndinfo->timeout_count < 6)
+ retval = BLK_EH_RESET_TIMER;
+
+ /* Reset the timeout if it is locked IO */
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
+ (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
+ TRACE2(("%s(): locked IO, reset timeout\n", __func__));
+ retval = BLK_EH_RESET_TIMER;
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+ return retval;
+}
+
+
static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
BUG_ON(!cmndinfo);
scp->scsi_done = done;
- gdth_update_timeout(scp, scp->timeout_per_command * 6);
+ cmndinfo->timeout_count = 0;
cmndinfo->priority = DEFAULT_PRI;
return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
ha->hdr[j].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_wait_completion(ha, ha->bus_cnt, j);
- gdth_stop_timeout(ha, ha->bus_cnt, j);
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[j].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_start_timeout(ha, ha->bus_cnt, j);
gdth_next(ha);
}
}
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j) {
+ for (j = 0; j < ha->tid_cnt; ++j)
gdth_wait_completion(ha, i, j);
- gdth_stop_timeout(ha, i, j);
- }
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j) {
- gdth_start_timeout(ha, i, j);
+ for (j = 0; j < ha->tid_cnt; ++j)
gdth_next(ha);
- }
}
}
break;
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
.slave_configure = gdth_slave_configure,
.bios_param = gdth_bios_param,
.proc_info = gdth_proc_info,
+ .eh_timed_out = gdth_timed_out,
.proc_name = "gdth",
.can_queue = GDTH_MAXCMDS,
.this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ca92476727c..1646444e9bd 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
dma_addr_t sense_paddr; /* sense dma-addr */
unchar priority;
- int timeout;
+ int timeout_count; /* # of timeout calls */
volatile int wait_for_completion;
ushort status;
ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index ce0228e26ae..59349a316e1 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
-
-static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
-{
- ulong flags;
- Scsi_Cmnd *scp;
- unchar b, t;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- if (!cmndinfo->internal_command) {
- b = scp->device->channel;
- t = scp->device->id;
- if (t == (unchar)id && b == (unchar)busnum) {
- TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
- cmndinfo->timeout = gdth_update_timeout(scp, 0);
- }
- }
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
-{
- ulong flags;
- Scsi_Cmnd *scp;
- unchar b, t;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- if (!cmndinfo->internal_command) {
- b = scp->device->channel;
- t = scp->device->id;
- if (t == (unchar)id && b == (unchar)busnum) {
- TRACE2(("gdth_start_timeout(): update_timeout()\n"));
- gdth_update_timeout(scp, cmndinfo->timeout);
- }
- }
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
-{
- int oldto;
-
- oldto = scp->timeout_per_command;
- scp->timeout_per_command = timeout;
-
- if (timeout == 0) {
- del_timer(&scp->eh_timeout);
- scp->eh_timeout.data = (unsigned long) NULL;
- scp->eh_timeout.expires = 0;
- } else {
- if (scp->eh_timeout.data != (unsigned long) NULL)
- del_timer(&scp->eh_timeout);
- scp->eh_timeout.data = (unsigned long) scp;
- scp->eh_timeout.expires = jiffies + timeout;
- add_timer(&scp->eh_timeout);
- }
-
- return oldto;
-}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 45e6fdacf36..9b900cc9ebe 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
ulong64 *paddr);
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
-static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
-static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
-static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
#endif
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fed0b02ebc1..3fdbb13e80a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data)
struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
{
struct device *cdev;
- struct Scsi_Host *shost = ERR_PTR(-ENXIO);
+ struct Scsi_Host *shost = NULL;
cdev = class_find_device(&shost_class, NULL, &hostnum,
__scsi_host_match);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7b1502c0ab6..87e09f35d3d 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
- cmnd->timeout_per_command/HZ);
+ cmnd->request->timeout/HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 461331d3dc4..81c16cba541 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
pc->scsi_cmd = cmd;
pc->done = done;
- pc->timeout = jiffies + cmd->timeout_per_command;
+ pc->timeout = jiffies + cmd->request->timeout;
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e7a3a655442..d30eb7ba018 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->no_uld_attach = 1;
}
if (ipr_is_vset_device(res)) {
- sdev->timeout = IPR_VSET_RW_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue,
+ IPR_VSET_RW_TIMEOUT);
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc9e6ddf41d..ef683f0d2b5 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
- TimeOut = scb->scsi_cmd->timeout_per_command;
+ TimeOut = scb->scsi_cmd->request->timeout;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 299e075a7b3..da7b67d30d9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1456,7 +1456,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
if (lun == task->sc->device->lun || lun == -1) {
debug_scsi("failing in progress sc %p itt 0x%x\n",
task->sc, task->itt);
- fail_command(conn, task, DID_BUS_BUSY << 16);
+ fail_command(conn, task, error << 16);
}
}
}
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
- enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+ enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
cls_session = starget_to_session(scsi_target(scmd->device));
session = cls_session->dd_data;
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
goto done;
}
conn = session->leadconn;
if (!conn) {
/* In the middle of shuting down */
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
goto done;
}
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
/*
* if we are about to check the transport then give the command
* more time
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
jiffies))
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
/* if in the middle of checking the transport then give us more time */
if (conn->ping_task)
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
done:
spin_unlock(&session->lock);
- debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
+ debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
+ "timer reset" : "nh");
return rc;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 48ee8c7f5bd..e1550117069 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -294,10 +294,10 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
}
}
-static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
+static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
u32 val)
{
- struct domain_device *dev = ap->private_data;
+ struct domain_device *dev = link->ap->private_data;
SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
@@ -319,10 +319,10 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
return 0;
}
-static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
+static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
u32 *val)
{
- struct domain_device *dev = ap->private_data;
+ struct domain_device *dev = link->ap->private_data;
SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
- scsi_req_abort_cmd(qc->scsicmd);
+ blk_abort_request(qc->scsicmd->request);
scsi_schedule_eh(qc->scsicmd->device->host);
return;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b4f9368f116..0001374bd6b 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
-enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
+enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a8e3ef30907..744838780ad 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
return;
}
-enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
+enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
if (!task) {
- cmd->timeout_per_command /= 2;
+ cmd->request->timeout /= 2;
SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
- cmd, task, (cmd->timeout_per_command ?
- "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
- if (!cmd->timeout_per_command)
- return EH_NOT_HANDLED;
- return EH_RESET_TIMER;
+ cmd, task, (cmd->request->timeout ?
+ "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
+ if (!cmd->request->timeout)
+ return BLK_EH_NOT_HANDLED;
+ return BLK_EH_RESET_TIMER;
}
spin_lock_irqsave(&task->task_state_lock, flags);
BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
- cmd, task);
- return EH_HANDLED;
+ SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
+ "BLK_EH_HANDLED\n", cmd, task);
+ return BLK_EH_HANDLED;
}
if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
- "EH_RESET_TIMER\n",
+ "BLK_EH_RESET_TIMER\n",
cmd, task);
- return EH_RESET_TIMER;
+ return BLK_EH_RESET_TIMER;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
+ SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
cmd, task);
- return EH_NOT_HANDLED;
+ return BLK_EH_NOT_HANDLED;
}
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
return;
}
- scsi_req_abort_cmd(sc);
+ blk_abort_request(sc->request);
scsi_schedule_eh(sc->device->host);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 97b763378e7..afe1de99876 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
* cmd has not been completed within the timeout period.
*/
static enum
-scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
{
struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
struct megasas_instance *instance;
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
if (time_after(jiffies, scmd->jiffies_at_alloc +
(MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
- return EH_NOT_HANDLED;
+ return BLK_EH_NOT_HANDLED;
}
instance = cmd->instance;
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
spin_unlock_irqrestore(instance->host->host_lock, flags);
}
- return EH_RESET_TIMER;
+ return BLK_EH_RESET_TIMER;
}
/**
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c57c94c0ffd..3b7240e4081 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
**
**----------------------------------------------------
*/
- if (np->settle_time && cmd->timeout_per_command >= HZ) {
- u_long tlimit = jiffies + cmd->timeout_per_command - HZ;
+ if (np->settle_time && cmd->request->timeout >= HZ) {
+ u_long tlimit = jiffies + cmd->request->timeout - HZ;
if (time_after(np->settle_time, tlimit))
np->settle_time = tlimit;
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 37f9ba0cd79..b6cd12b2e99 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
+ pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
+ pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 45e7dcb4b34..0ddfe7106b3 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
valid = 0;
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
- else if (start == (FA_BOOT_CODE_ADDR*4) ||
- start == (FA_RISC_CODE_ADDR*4))
+ else if (start == (ha->flt_region_boot * 4) ||
+ start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4))
+ else if (IS_QLA25XX(ha) &&
+ start == (ha->flt_region_vpd_nvram * 4))
valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
@@ -1065,6 +1066,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->dumped_frames = stats->dumped_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
}
+ pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
+ pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 94a720eabfd..83c81921677 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,7 +25,6 @@
#include <linux/firmware.h>
#include <linux/aer.h>
#include <linux/mutex.h>
-#include <linux/semaphore.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx {
struct qla_statistics {
uint32_t total_isp_aborts;
+ uint64_t input_bytes;
+ uint64_t output_bytes;
};
/*
@@ -2238,6 +2239,7 @@ typedef struct scsi_qla_host {
#define FCPORT_UPDATE_NEEDED 27
#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
#define UNLOADING 29
+#define NPIV_CONFIG_NEEDED 30
uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0
@@ -2507,7 +2509,6 @@ typedef struct scsi_qla_host {
uint64_t fce_wr, fce_rd;
struct mutex fce_mutex;
- uint32_t hw_event_start;
uint32_t hw_event_ptr;
uint32_t hw_event_pause_errors;
@@ -2553,6 +2554,14 @@ typedef struct scsi_qla_host {
uint32_t fdt_unprotect_sec_cmd;
uint32_t fdt_protect_sec_cmd;
+ uint32_t flt_region_flt;
+ uint32_t flt_region_fdt;
+ uint32_t flt_region_boot;
+ uint32_t flt_region_fw;
+ uint32_t flt_region_vpd_nvram;
+ uint32_t flt_region_hw_event;
+ uint32_t flt_region_npiv_conf;
+
/* Needed for BEACON */
uint16_t beacon_blink_led;
uint8_t beacon_color_state;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index cf194517400..d1d14202575 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -789,14 +789,23 @@ struct device_reg_24xx {
#define FA_RISC_CODE_ADDR 0x20000
#define FA_RISC_CODE_SEGMENTS 2
+#define FA_FLASH_DESCR_ADDR_24 0x11000
+#define FA_FLASH_LAYOUT_ADDR_24 0x11400
+#define FA_NPIV_CONF0_ADDR_24 0x16000
+#define FA_NPIV_CONF1_ADDR_24 0x17000
+
#define FA_FW_AREA_ADDR 0x40000
#define FA_VPD_NVRAM_ADDR 0x48000
#define FA_FEATURE_ADDR 0x4C000
#define FA_FLASH_DESCR_ADDR 0x50000
+#define FA_FLASH_LAYOUT_ADDR 0x50400
#define FA_HW_EVENT0_ADDR 0x54000
-#define FA_HW_EVENT1_ADDR 0x54200
+#define FA_HW_EVENT1_ADDR 0x54400
#define FA_HW_EVENT_SIZE 0x200
#define FA_HW_EVENT_ENTRY_SIZE 4
+#define FA_NPIV_CONF0_ADDR 0x5C000
+#define FA_NPIV_CONF1_ADDR 0x5D000
+
/*
* Flash Error Log Event Codes.
*/
@@ -806,10 +815,6 @@ struct device_reg_24xx {
#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
#define HW_EVENT_FLASH_FW_ERR 0xF024
-#define FA_BOOT_LOG_ADDR 0x58000
-#define FA_FW_DUMP0_ADDR 0x60000
-#define FA_FW_DUMP1_ADDR 0x70000
-
uint32_t flash_data; /* Flash/NVRAM BIOS data. */
uint32_t ctrl_status; /* Control/Status. */
@@ -1203,6 +1208,62 @@ struct qla_fdt_layout {
uint8_t unused2[65];
};
+/* Flash Layout Table ********************************************************/
+
+struct qla_flt_location {
+ uint8_t sig[4];
+ uint32_t start_lo;
+ uint32_t start_hi;
+ uint16_t unused;
+ uint16_t checksum;
+};
+
+struct qla_flt_header {
+ uint16_t version;
+ uint16_t length;
+ uint16_t checksum;
+ uint16_t unused;
+};
+
+#define FLT_REG_FW 0x01
+#define FLT_REG_BOOT_CODE 0x07
+#define FLT_REG_VPD_0 0x14
+#define FLT_REG_NVRAM_0 0x15
+#define FLT_REG_VPD_1 0x16
+#define FLT_REG_NVRAM_1 0x17
+#define FLT_REG_FDT 0x1a
+#define FLT_REG_FLT 0x1c
+#define FLT_REG_HW_EVENT_0 0x1d
+#define FLT_REG_HW_EVENT_1 0x1f
+#define FLT_REG_NPIV_CONF_0 0x29
+#define FLT_REG_NPIV_CONF_1 0x2a
+
+struct qla_flt_region {
+ uint32_t code;
+ uint32_t size;
+ uint32_t start;
+ uint32_t end;
+};
+
+/* Flash NPIV Configuration Table ********************************************/
+
+struct qla_npiv_header {
+ uint8_t sig[2];
+ uint16_t version;
+ uint16_t entries;
+ uint16_t unused[4];
+ uint16_t checksum;
+};
+
+struct qla_npiv_entry {
+ uint16_t flags;
+ uint16_t vf_id;
+ uint16_t qos;
+ uint16_t unused1;
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+};
+
/* 84XX Support **************************************************************/
#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b156735e9a..753dbe6cce6 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
uint16_t, uint16_t);
-extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
+extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
+extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
+
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ee89ddd64aa..a470f2d3270 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
ha->isp_ops->reset_chip(ha);
+ rval = qla2xxx_get_flash_info(ha);
+ if (rval) {
+ DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
+ ha->host_no));
+ return (rval);
+ }
+
ha->isp_ops->get_flash_version(ha, ha->request_ring);
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
@@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
rval = qla2x00_setup_chip(ha);
if (rval)
return (rval);
- qla2xxx_get_flash_info(ha);
}
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(ha);
@@ -2016,7 +2022,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
DEBUG3(printk("%s: exiting normally\n", __func__));
}
- /* Restore state if a resync event occured during processing */
+ /* Restore state if a resync event occurred during processing */
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
@@ -2561,7 +2567,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
rval = QLA_SUCCESS;
/* Try GID_PT to get device list, else GAN. */
- swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC);
+ swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
if (!swl) {
/*EMPTY*/
DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
@@ -3751,7 +3757,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
rval = QLA_SUCCESS;
segments = FA_RISC_CODE_SEGMENTS;
- faddr = FA_RISC_CODE_ADDR;
+ faddr = ha->flt_region_fw;
dcode = (uint32_t *)ha->request_ring;
*srisc_addr = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 92fafbdbbaa..e90afad120e 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha)
* @ha: HA context
* @ha_locked: is function called with the hardware lock
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
static inline int
qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d57669aa461..85bc0a48598 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
* Returns the proper CF_* direction based on CDB.
*/
static inline uint16_t
-qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
+qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
cflags = 0;
/* Set transfer direction */
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ sp->fcport->ha->qla_stats.output_bytes +=
+ scsi_bufflen(sp->cmd);
+ } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
+ sp->fcport->ha->qla_stats.input_bytes +=
+ scsi_bufflen(sp->cmd);
+ }
return (cflags);
}
@@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
ha = sp->ha;
- cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
+ cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
avail_dsds = 3;
@@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
ha = sp->ha;
- cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
+ cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
avail_dsds = 2;
@@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* qla2x00_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
int
qla2x00_start_scsi(srb_t *sp)
@@ -407,7 +412,7 @@ queuing_error:
*
* Can be called from both normal and interrupt context.
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
int
__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
@@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
ha = sp->ha;
/* Set transfer direction */
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ sp->fcport->ha->qla_stats.output_bytes +=
+ scsi_bufflen(sp->cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
+ sp->fcport->ha->qla_stats.input_bytes +=
+ scsi_bufflen(sp->cmd);
+ }
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1;
@@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* qla24xx_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
int
qla24xx_start_scsi(srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 45a3b93eed5..fc4bfa7f839 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
+ DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
mb[1]));
- qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
+ qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
ha->host_no, mb[1]));
qla_printk(KERN_INFO, ha,
- "LIP reset occured (%x).\n", mb[1]);
+ "LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
case MBA_PORT_UPDATE: /* Port database update */
/*
- * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
+ * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
* event etc. earlier indicating loop is down) then process
* it. Otherwise ignore it and Wait for RSCN to come in.
*/
@@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
ha->host_no, mb[1], mb[2], mb[3]));
- rscn_entry = (mb[1] << 16) | mb[2];
+ rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
ha->d_id.b.al_pa;
if (rscn_entry == host_pid) {
@@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
}
+ /* Ignore reserved bits from RSCN-payload. */
+ rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
rscn_queue_index = ha->rscn_in_ptr + 1;
if (rscn_queue_index == MAX_RSCN_COUNT)
rscn_queue_index = 0;
@@ -1060,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
resid = resid_len;
/* Use F/W calculated residual length. */
if (IS_FWI2_CAPABLE(ha)) {
- if (scsi_status & SS_RESIDUAL_UNDER &&
- resid != fw_resid_len) {
+ if (!(scsi_status & SS_RESIDUAL_UNDER)) {
+ lscsi_status = 0;
+ } else if (resid != fw_resid_len) {
scsi_status &= ~SS_RESIDUAL_UNDER;
lscsi_status = 0;
}
@@ -1834,7 +1837,6 @@ clear_risc_ints:
WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
}
spin_unlock_irq(&ha->hardware_lock);
- ha->isp_ops->enable_intrs(ha);
fail:
return ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 813bc7784c0..36bc6851e23 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
DEBUG2_3_11(printk("%s(%ld): timeout schedule "
"isp_abort_needed.\n", __func__, ha->host_no));
qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occured. Scheduling ISP "
+ "Mailbox command timeout occurred. Scheduling ISP "
"abort.\n");
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
qla2xxx_wake_dpc(ha);
@@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
DEBUG2_3_11(printk("%s(%ld): timeout calling "
"abort_isp\n", __func__, ha->host_no));
qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occured. Issuing ISP "
+ "Mailbox command timeout occurred. Issuing ISP "
"abort.\n");
set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
@@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
char *pmap;
dma_addr_t pmap_dma;
- pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma);
+ pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
__func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 26afe44265c..3433441b956 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1517,6 +1517,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
set_bit(RSCN_UPDATE, &ha->dpc_flags);
+ set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
}
static int
@@ -1663,8 +1664,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_25XX;
ha->isp_ops = &qla25xx_isp_ops;
- ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
- FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
}
host->can_queue = ha->request_q_length + 128;
@@ -1740,6 +1739,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto probe_failed;
+ ha->isp_ops->enable_intrs(ha);
+
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(ha);
@@ -2431,6 +2432,12 @@ qla2x00_do_dpc(void *data)
ha->host_no));
}
+ if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
+ atomic_read(&ha->loop_state) == LOOP_READY) {
+ clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
+ qla2xxx_flash_npiv_conf(ha);
+ }
+
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1bca7447493..90a13211717 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -543,23 +543,198 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
}
}
-void
-qla2xxx_get_flash_info(scsi_qla_host_t *ha)
+static int
+qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
+{
+ const char *loc, *locations[] = { "DEF", "PCI" };
+ uint32_t pcihdr, pcids;
+ uint32_t *dcode;
+ uint8_t *buf, *bcode, last_image;
+ uint16_t cnt, chksum, *wptr;
+ struct qla_flt_location *fltl;
+
+ /*
+ * FLT-location structure resides after the last PCI region.
+ */
+
+ /* Begin with sane defaults. */
+ loc = locations[0];
+ *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
+ FA_FLASH_LAYOUT_ADDR;
+
+ /* Begin with first PCI expansion ROM header. */
+ buf = (uint8_t *)ha->request_ring;
+ dcode = (uint32_t *)ha->request_ring;
+ pcihdr = 0;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
+ bcode = buf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
+ goto end;
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+ qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
+ bcode = buf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+ bcode[0x2] != 'I' || bcode[0x3] != 'R')
+ goto end;
+
+ last_image = bcode[0x15] & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+ } while (!last_image);
+
+ /* Now verify FLT-location structure. */
+ fltl = (struct qla_flt_location *)ha->request_ring;
+ qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
+ sizeof(struct qla_flt_location) >> 2);
+ if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
+ fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
+ goto end;
+
+ wptr = (uint16_t *)ha->request_ring;
+ cnt = sizeof(struct qla_flt_location) >> 1;
+ for (chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ qla_printk(KERN_ERR, ha,
+ "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
+ qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Good data. Use specified location. */
+ loc = locations[1];
+ *start = le16_to_cpu(fltl->start_hi) << 16 |
+ le16_to_cpu(fltl->start_lo);
+end:
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
+ return QLA_SUCCESS;
+}
+
+static void
+qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
+{
+ const char *loc, *locations[] = { "DEF", "FLT" };
+ uint16_t *wptr;
+ uint16_t cnt, chksum;
+ uint32_t start;
+ struct qla_flt_header *flt;
+ struct qla_flt_region *region;
+
+ ha->flt_region_flt = flt_addr;
+ wptr = (uint16_t *)ha->request_ring;
+ flt = (struct qla_flt_header *)ha->request_ring;
+ region = (struct qla_flt_region *)&flt[1];
+ ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
+ flt_addr << 2, OPTROM_BURST_SIZE);
+ if (*wptr == __constant_cpu_to_le16(0xffff))
+ goto no_flash_data;
+ if (flt->version != __constant_cpu_to_le16(1)) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
+ "version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ le16_to_cpu(flt->checksum)));
+ goto no_flash_data;
+ }
+
+ cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+ for (chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
+ "version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ chksum));
+ goto no_flash_data;
+ }
+
+ loc = locations[1];
+ cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+ for ( ; cnt; cnt--, region++) {
+ /* Store addresses as DWORD offsets. */
+ start = le32_to_cpu(region->start) >> 2;
+
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
+ le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
+
+ switch (le32_to_cpu(region->code)) {
+ case FLT_REG_FW:
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_BOOT_CODE:
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_VPD_0:
+ ha->flt_region_vpd_nvram = start;
+ break;
+ case FLT_REG_FDT:
+ ha->flt_region_fdt = start;
+ break;
+ case FLT_REG_HW_EVENT_0:
+ if (!PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_hw_event = start;
+ break;
+ case FLT_REG_HW_EVENT_1:
+ if (PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_hw_event = start;
+ break;
+ case FLT_REG_NPIV_CONF_0:
+ if (!PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_npiv_conf = start;
+ break;
+ case FLT_REG_NPIV_CONF_1:
+ if (PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_npiv_conf = start;
+ break;
+ }
+ }
+ goto done;
+
+no_flash_data:
+ /* Use hardcoded defaults. */
+ loc = locations[0];
+ ha->flt_region_fw = FA_RISC_CODE_ADDR;
+ ha->flt_region_boot = FA_BOOT_CODE_ADDR;
+ ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
+ ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
+ FA_FLASH_DESCR_ADDR;
+ ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
+ FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
+ ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
+ (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
+ (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
+done:
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
+ "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
+ ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
+ ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
+ ha->flt_region_npiv_conf));
+}
+
+static void
+qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
{
#define FLASH_BLK_SIZE_32K 0x8000
#define FLASH_BLK_SIZE_64K 0x10000
+ const char *loc, *locations[] = { "MID", "FDT" };
uint16_t cnt, chksum;
uint16_t *wptr;
struct qla_fdt_layout *fdt;
uint8_t man_id, flash_id;
-
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
- return;
+ uint16_t mid, fid;
wptr = (uint16_t *)ha->request_ring;
fdt = (struct qla_fdt_layout *)ha->request_ring;
ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
- FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE);
+ ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -577,7 +752,10 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
goto no_flash_data;
}
- ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f;
+ loc = locations[1];
+ mid = le16_to_cpu(fdt->man_id);
+ fid = le16_to_cpu(fdt->id);
+ ha->fdt_odd_index = mid == 0x1f;
ha->fdt_wrt_disable = fdt->wrt_disable_bits;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
flash_conf_to_access_addr(0x0336);
}
-
- DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
- "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
- le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
- ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
- ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
- return;
-
+ goto done;
no_flash_data:
+ loc = locations[0];
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
+ mid = man_id;
+ fid = flash_id;
ha->fdt_wrt_disable = 0x9c;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
switch (man_id) {
@@ -625,14 +799,117 @@ no_flash_data:
ha->fdt_block_size = FLASH_BLK_SIZE_64K;
break;
}
-
- DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x "
- "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id,
+done:
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+ "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
ha->fdt_block_size));
}
+int
+qla2xxx_get_flash_info(scsi_qla_host_t *ha)
+{
+ int ret;
+ uint32_t flt_addr;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return QLA_SUCCESS;
+
+ ret = qla2xxx_find_flt_start(ha, &flt_addr);
+ if (ret != QLA_SUCCESS)
+ return ret;
+
+ qla2xxx_get_flt_info(ha, flt_addr);
+ qla2xxx_get_fdt_info(ha);
+
+ return QLA_SUCCESS;
+}
+
+void
+qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
+{
+#define NPIV_CONFIG_SIZE (16*1024)
+ void *data;
+ uint16_t *wptr;
+ uint16_t cnt, chksum;
+ struct qla_npiv_header hdr;
+ struct qla_npiv_entry *entry;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return;
+
+ ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
+ ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
+ if (hdr.version == __constant_cpu_to_le16(0xffff))
+ return;
+ if (hdr.version != __constant_cpu_to_le16(1)) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
+ "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+ le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+ le16_to_cpu(hdr.checksum)));
+ return;
+ }
+
+ data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
+ if (!data) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
+ "allocate memory.\n"));
+ return;
+ }
+
+ ha->isp_ops->read_optrom(ha, (uint8_t *)data,
+ ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
+
+ cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
+ sizeof(struct qla_npiv_entry)) >> 1;
+ for (wptr = data, chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
+ "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+ le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+ chksum));
+ goto done;
+ }
+
+ entry = data + sizeof(struct qla_npiv_header);
+ cnt = le16_to_cpu(hdr.entries);
+ for ( ; cnt; cnt--, entry++) {
+ uint16_t flags;
+ struct fc_vport_identifiers vid;
+ struct fc_vport *vport;
+
+ flags = le16_to_cpu(entry->flags);
+ if (flags == 0xffff)
+ continue;
+ if ((flags & BIT_0) == 0)
+ continue;
+
+ memset(&vid, 0, sizeof(vid));
+ vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vid.vport_type = FC_PORTTYPE_NPIV;
+ vid.disable = false;
+ vid.port_name = wwn_to_u64(entry->port_name);
+ vid.node_name = wwn_to_u64(entry->node_name);
+
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
+ "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name,
+ le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
+
+ vport = fc_vport_create(ha->host, 0, &vid);
+ if (!vport)
+ qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
+ "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name);
+ }
+done:
+ kfree(data);
+}
+
static void
qla24xx_unprotect_flash(scsi_qla_host_t *ha)
{
@@ -920,7 +1197,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
- flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr)));
+ flash_data_to_access_addr(ha->flt_region_vpd_nvram |
+ naddr)));
return buf;
}
@@ -935,10 +1213,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dbuf = vmalloc(RMW_BUFFER_SIZE);
if (!dbuf)
return QLA_MEMORY_ALLOC_FAILED;
- ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2,
+ ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
memcpy(dbuf + (naddr << 2), buf, bytes);
- ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2,
+ ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
vfree(dbuf);
@@ -2166,7 +2444,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
memset(dbyte, 0, 8);
dcode = (uint16_t *)dbyte;
- qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10,
+ qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
8);
DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
__func__, ha->host_no));
@@ -2177,7 +2455,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw revision at "
- "%x.\n", __func__, FA_RISC_CODE_ADDR * 4));
+ "%x.\n", __func__, ha->flt_region_fw * 4));
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2212,7 +2490,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
dcode = mbuf;
/* Begin with first PCI expansion ROM header. */
- pcihdr = 0;
+ pcihdr = ha->flt_region_boot;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
@@ -2282,7 +2560,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
- qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4);
+ qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
@@ -2291,7 +2569,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
- __func__, FA_RISC_CODE_ADDR));
+ __func__, ha->flt_region_fw));
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
@@ -2355,7 +2633,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
/* Locate first empty entry. */
for (;;) {
if (ha->hw_event_ptr >=
- ha->hw_event_start + FA_HW_EVENT_SIZE) {
+ ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Log Full!\n"));
return QLA_MEMORY_ALLOC_FAILED;
@@ -2391,7 +2669,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
int rval;
uint32_t marker[2], fdata[4];
- if (ha->hw_event_start == 0)
+ if (ha->flt_region_hw_event == 0)
return QLA_FUNCTION_FAILED;
DEBUG2(qla_printk(KERN_WARNING, ha,
@@ -2406,7 +2684,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
/* Locate marker. */
- ha->hw_event_ptr = ha->hw_event_start;
+ ha->hw_event_ptr = ha->flt_region_hw_event;
for (;;) {
qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
4);
@@ -2415,7 +2693,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
break;
ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
if (ha->hw_event_ptr >=
- ha->hw_event_start + FA_HW_EVENT_SIZE) {
+ ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Log Full!\n"));
return QLA_MEMORY_ALLOC_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4160e4caa7b..be5e299df52 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k7"
+#define QLA2XXX_VERSION "8.02.01-k8"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 88bebb13bc5..de8279ad7d8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
- cmd, jiffies, cmd->timeout_per_command / HZ,
+ cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
/* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
- ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
+ ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 4a1cf6377f6..90535089672 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -914,6 +914,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
ds[i].d_count = sg_dma_len(s);
}
sg_count -= n;
+ sg = s;
}
} else {
cmd->dataseg[0].d_base = 0;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ee6be596503..2ac3cb2b908 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
unsigned long flags;
cmd->device = dev;
- init_timer(&cmd->eh_timeout);
INIT_LIST_HEAD(&cmd->list);
spin_lock_irqsave(&dev->list_lock, flags);
list_add_tail(&cmd->list, &dev->cmd_list);
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
unsigned long timeout;
int rtn = 0;
+ /*
+ * We will use a queued command if possible, otherwise we will
+ * emulate the queuing and calling of completion function ourselves.
+ */
+ atomic_inc(&cmd->device->iorequest_cnt);
+
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
/* in SDEV_DEL we error all commands. DID_NO_CONNECT
* returns an immediate error upwards, and signals
* that the device is no longer present */
cmd->result = DID_NO_CONNECT << 16;
- atomic_inc(&cmd->device->iorequest_cnt);
- __scsi_done(cmd);
+ scsi_done(cmd);
/* return 0 (because the command has been processed) */
goto out;
}
- /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
- if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
+ /* Check to see if the scsi lld made this device blocked. */
+ if (unlikely(scsi_device_blocked(cmd->device))) {
/*
- * in SDEV_BLOCK, the command is just put back on the device
- * queue. The suspend state has already blocked the queue so
- * future requests should not occur until the device
- * transitions out of the suspend state.
+ * in blocked state, the command is just put back on
+ * the device queue. The suspend state has already
+ * blocked the queue so future requests should not
+ * occur until the device transitions out of the
+ * suspend state.
*/
+
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
host->resetting = 0;
}
- /*
- * AK: unlikely race here: for some reason the timer could
- * expire before the serial number is set up below.
- */
- scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
-
scsi_log_send(cmd);
/*
- * We will use a queued command if possible, otherwise we will
- * emulate the queuing and calling of completion function ourselves.
- */
- atomic_inc(&cmd->device->iorequest_cnt);
-
- /*
* Before we queue this command, check if the command
* length exceeds what the host adapter can handle.
*/
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(host->host_lock, flags);
+ /*
+ * AK: unlikely race here: for some reason the timer could
+ * expire before the serial number is set up below.
+ *
+ * TODO: kill serial or move to blk layer
+ */
scsi_cmd_get_serial(host, cmd);
if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -754,12 +754,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
- if (scsi_delete_timer(cmd)) {
- atomic_inc(&cmd->device->iodone_cnt);
- scsi_queue_insert(cmd,
- (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
- rtn : SCSI_MLQUEUE_HOST_BUSY);
- }
+ scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
+ rtn : SCSI_MLQUEUE_HOST_BUSY);
SCSI_LOG_MLQUEUE(3,
printk("queuecommand : request rejected\n"));
}
@@ -770,24 +766,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
/**
- * scsi_req_abort_cmd -- Request command recovery for the specified command
- * @cmd: pointer to the SCSI command of interest
- *
- * This function requests that SCSI Core start recovery for the
- * command by deleting the timer and adding the command to the eh
- * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
- * implement their own error recovery MAY ignore the timeout event if
- * they generated scsi_req_abort_cmd.
- */
-void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
-{
- if (!scsi_delete_timer(cmd))
- return;
- scsi_times_out(cmd);
-}
-EXPORT_SYMBOL(scsi_req_abort_cmd);
-
-/**
* scsi_done - Enqueue the finished SCSI command into the done queue.
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
* ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -802,42 +780,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
- /*
- * We don't have to worry about this one timing out anymore.
- * If we are unable to remove the timer, then the command
- * has already timed out. In which case, we have no choice but to
- * let the timeout function run, as we have no idea where in fact
- * that function could really be. It might be on another processor,
- * etc, etc.
- */
- if (!scsi_delete_timer(cmd))
- return;
- __scsi_done(cmd);
-}
-
-/* Private entry to scsi_done() to complete a command when the timer
- * isn't running --- used by scsi_times_out */
-void __scsi_done(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
-
- /*
- * Set the serial numbers back to zero
- */
- cmd->serial_number = 0;
-
- atomic_inc(&cmd->device->iodone_cnt);
- if (cmd->result)
- atomic_inc(&cmd->device->ioerr_cnt);
-
- BUG_ON(!rq);
-
- /*
- * The uptodate/nbytes values don't matter, as we allow partial
- * completes and thus will check this in the softirq callback
- */
- rq->completion_data = cmd;
- blk_complete_request(rq);
+ blk_complete_request(cmd->request);
}
/* Move this to a header if it becomes more generally useful */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 880051c89bd..fecefa05cb6 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
}
/**
- * scsi_add_timer - Start timeout timer for a single scsi command.
- * @scmd: scsi command that is about to start running.
- * @timeout: amount of time to allow this command to run.
- * @complete: timeout function to call if timer isn't canceled.
- *
- * Notes:
- * This should be turned into an inline function. Each scsi command
- * has its own timer, and as it is added to the queue, we set up the
- * timer. When the command completes, we cancel the timer.
- */
-void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
- void (*complete)(struct scsi_cmnd *))
-{
-
- /*
- * If the clock was already running for this command, then
- * first delete the timer. The timer handling code gets rather
- * confused if we don't do this.
- */
- if (scmd->eh_timeout.function)
- del_timer(&scmd->eh_timeout);
-
- scmd->eh_timeout.data = (unsigned long)scmd;
- scmd->eh_timeout.expires = jiffies + timeout;
- scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
-
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
- " %d, (%p)\n", __func__,
- scmd, timeout, complete));
-
- add_timer(&scmd->eh_timeout);
-}
-
-/**
- * scsi_delete_timer - Delete/cancel timer for a given function.
- * @scmd: Cmd that we are canceling timer for
- *
- * Notes:
- * This should be turned into an inline function.
- *
- * Return value:
- * 1 if we were able to detach the timer. 0 if we blew it, and the
- * timer function has already started to run.
- */
-int scsi_delete_timer(struct scsi_cmnd *scmd)
-{
- int rtn;
-
- rtn = del_timer(&scmd->eh_timeout);
-
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
- " rtn: %d\n", __func__,
- scmd, rtn));
-
- scmd->eh_timeout.data = (unsigned long)NULL;
- scmd->eh_timeout.function = NULL;
-
- return rtn;
-}
-
-/**
* scsi_times_out - Timeout function for normal scsi commands.
- * @scmd: Cmd that is timing out.
+ * @req: request that is timing out.
*
* Notes:
* We do not need to lock this. There is the potential for a race
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
-void scsi_times_out(struct scsi_cmnd *scmd)
+enum blk_eh_timer_return scsi_times_out(struct request *req)
{
- enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
+ struct scsi_cmnd *scmd = req->special;
+ enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
+ enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
scsi_log_completion(scmd, TIMEOUT_ERROR);
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
eh_timed_out = NULL;
if (eh_timed_out)
- switch (eh_timed_out(scmd)) {
- case EH_HANDLED:
- __scsi_done(scmd);
- return;
- case EH_RESET_TIMER:
- scsi_add_timer(scmd, scmd->timeout_per_command,
- scsi_times_out);
- return;
- case EH_NOT_HANDLED:
+ rtn = eh_timed_out(scmd);
+ switch (rtn) {
+ case BLK_EH_NOT_HANDLED:
break;
+ default:
+ return rtn;
}
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
scmd->result |= DID_TIME_OUT << 16;
- __scsi_done(scmd);
+ return BLK_EH_HANDLED;
}
+
+ return BLK_EH_NOT_HANDLED;
}
/**
@@ -391,7 +330,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
case HARDWARE_ERROR:
if (scmd->device->retry_hwerror)
- return NEEDS_RETRY;
+ return ADD_TO_MLQUEUE;
else
return SUCCESS;
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
blk_rq_init(NULL, &req);
scmd->request = &req;
- memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
scmd->cmnd = req.cmd;
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
scmd->sc_data_direction = DMA_BIDIRECTIONAL;
- init_timer(&scmd->eh_timeout);
-
spin_lock_irqsave(shost->host_lock, flags);
shost->tmf_in_progress = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ff5d56b3ee4..98ee55ced59 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -852,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- int this_count = scsi_bufflen(cmd);
+ int this_count;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
@@ -908,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
+ this_count = blk_rq_bytes(req);
/* good_bytes = 0, or (inclusive) there were leftovers and
* result = 0, so scsi_end_request couldn't retry.
@@ -1180,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
cmd->transfersize = req->data_len;
cmd->allowed = req->retries;
- cmd->timeout_per_command = req->timeout;
return BLKPREP_OK;
}
EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1250,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
break;
case SDEV_QUIESCE:
case SDEV_BLOCK:
+ case SDEV_CREATED_BLOCK:
/*
* If the devices is blocked we defer normal commands.
*/
@@ -1415,17 +1416,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
- __scsi_done(cmd);
+ blk_complete_request(req);
}
static void scsi_softirq_done(struct request *rq)
{
- struct scsi_cmnd *cmd = rq->completion_data;
- unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
+ struct scsi_cmnd *cmd = rq->special;
+ unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
+ /*
+ * Set the serial numbers back to zero
+ */
+ cmd->serial_number = 0;
+
+ atomic_inc(&cmd->device->iodone_cnt);
+ if (cmd->result)
+ atomic_inc(&cmd->device->ioerr_cnt);
+
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1674,6 +1684,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
+ blk_queue_rq_timed_out(q, scsi_times_out);
return q;
}
@@ -2063,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (state) {
case SDEV_CREATED:
- /* There are no legal states that come back to
- * created. This is the manually initialised start
- * state */
- goto illegal;
+ switch (oldstate) {
+ case SDEV_CREATED_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
case SDEV_RUNNING:
switch (oldstate) {
@@ -2104,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_BLOCK:
switch (oldstate) {
- case SDEV_CREATED:
case SDEV_RUNNING:
+ case SDEV_CREATED_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_CREATED_BLOCK:
+ switch (oldstate) {
+ case SDEV_CREATED:
break;
default:
goto illegal;
@@ -2393,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev)
int err = 0;
err = scsi_device_set_state(sdev, SDEV_BLOCK);
- if (err)
- return err;
+ if (err) {
+ err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
+
+ if (err)
+ return err;
+ }
/*
* The device has transitioned to SDEV_BLOCK. Stop the
@@ -2437,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
* and goose the device queue if successful.
*/
err = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (err)
- return err;
+ if (err) {
+ err = scsi_device_set_state(sdev, SDEV_CREATED);
+
+ if (err)
+ return err;
+ }
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index ae7ed9a2266..b37e133de80 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -21,6 +21,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/security.h>
+#include <linux/delay.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -30,6 +31,39 @@
struct sock *scsi_nl_sock = NULL;
EXPORT_SYMBOL_GPL(scsi_nl_sock);
+static DEFINE_SPINLOCK(scsi_nl_lock);
+static struct list_head scsi_nl_drivers;
+
+static u32 scsi_nl_state;
+#define STATE_EHANDLER_BSY 0x00000001
+
+struct scsi_nl_transport {
+ int (*msg_handler)(struct sk_buff *);
+ void (*event_handler)(struct notifier_block *, unsigned long, void *);
+ unsigned int refcnt;
+ int flags;
+};
+
+/* flags values (bit flags) */
+#define HANDLER_DELETING 0x1
+
+static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
+ { {NULL, }, };
+
+
+struct scsi_nl_drvr {
+ struct list_head next;
+ int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
+ u32 len, u32 pid);
+ void (*devt_handler)(struct notifier_block *nb,
+ unsigned long event, void *notify_ptr);
+ struct scsi_host_template *hostt;
+ u64 vendor_id;
+ unsigned int refcnt;
+ int flags;
+};
+
+
/**
* scsi_nl_rcv_msg - Receive message handler.
@@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct scsi_nl_hdr *hdr;
- uint32_t rlen;
- int err;
+ unsigned long flags;
+ u32 rlen;
+ int err, tport;
while (skb->len >= NLMSG_SPACE(0)) {
err = 0;
@@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
err = -EBADMSG;
- return;
+ goto next_msg;
}
hdr = NLMSG_DATA(nlh);
@@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
__func__);
- return;
+ goto next_msg;
}
/*
- * We currently don't support anyone sending us a message
+ * Deliver message to the appropriate transport
*/
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+
+ tport = hdr->transport;
+ if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
+ !(transports[tport].flags & HANDLER_DELETING) &&
+ (transports[tport].msg_handler)) {
+ transports[tport].refcnt++;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ err = transports[tport].msg_handler(skb);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ transports[tport].refcnt--;
+ } else
+ err = -ENOENT;
+
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
next_msg:
if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
@@ -110,14 +160,42 @@ static int
scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
+ struct scsi_nl_drvr *driver;
+ unsigned long flags;
+ int tport;
if (n->protocol != NETLINK_SCSITRANSPORT)
return NOTIFY_DONE;
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ scsi_nl_state |= STATE_EHANDLER_BSY;
+
/*
- * Currently, we are not tracking PID's, etc. There is nothing
- * to handle.
+ * Pass event on to any transports that may be listening
*/
+ for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
+ if (!(transports[tport].flags & HANDLER_DELETING) &&
+ (transports[tport].event_handler)) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ transports[tport].event_handler(this, event, ptr);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ }
+
+ /*
+ * Pass event on to any drivers that may be listening
+ */
+ list_for_each_entry(driver, &scsi_nl_drivers, next) {
+ if (!(driver->flags & HANDLER_DELETING) &&
+ (driver->devt_handler)) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ driver->devt_handler(this, event, ptr);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ }
+
+ scsi_nl_state &= ~STATE_EHANDLER_BSY;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
return NOTIFY_DONE;
}
@@ -128,7 +206,281 @@ static struct notifier_block scsi_netlink_notifier = {
/**
- * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface
+ * GENERIC SCSI transport receive and event handlers
+ **/
+
+/**
+ * scsi_generic_msg_handler - receive message handler for GENERIC transport
+ * messages
+ *
+ * @skb: socket receive buffer
+ *
+ **/
+static int
+scsi_generic_msg_handler(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
+ struct scsi_nl_drvr *driver;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+ int err = 0, match, pid;
+
+ pid = NETLINK_CREDS(skb)->pid;
+
+ switch (snlh->msgtype) {
+ case SCSI_NL_SHOST_VENDOR:
+ {
+ struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
+
+ /* Locate the driver that corresponds to the message */
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ match = 0;
+ list_for_each_entry(driver, &scsi_nl_drivers, next) {
+ if (driver->vendor_id == msg->vendor_id) {
+ match = 1;
+ break;
+ }
+ }
+
+ if ((!match) || (!driver->dmsg_handler)) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ err = -ESRCH;
+ goto rcv_exit;
+ }
+
+ if (driver->flags & HANDLER_DELETING) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ err = -ESHUTDOWN;
+ goto rcv_exit;
+ }
+
+ driver->refcnt++;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+
+ /* if successful, scsi_host_lookup takes a shost reference */
+ shost = scsi_host_lookup(msg->host_no);
+ if (!shost) {
+ err = -ENODEV;
+ goto driver_exit;
+ }
+
+ /* is this host owned by the vendor ? */
+ if (shost->hostt != driver->hostt) {
+ err = -EINVAL;
+ goto vendormsg_put;
+ }
+
+ /* pass message on to the driver */
+ err = driver->dmsg_handler(shost, (void *)&msg[1],
+ msg->vmsg_datalen, pid);
+
+vendormsg_put:
+ /* release reference by scsi_host_lookup */
+ scsi_host_put(shost);
+
+driver_exit:
+ /* release our own reference on the registration object */
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ driver->refcnt--;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ break;
+ }
+
+ default:
+ err = -EBADR;
+ break;
+ }
+
+rcv_exit:
+ if (err)
+ printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
+ __func__, snlh->msgtype, err);
+ return err;
+}
+
+
+/**
+ * scsi_nl_add_transport -
+ * Registers message and event handlers for a transport. Enables
+ * receipt of netlink messages and events to a transport.
+ *
+ * @tport: transport registering handlers
+ * @msg_handler: receive message handler callback
+ * @event_handler: receive event handler callback
+ **/
+int
+scsi_nl_add_transport(u8 tport,
+ int (*msg_handler)(struct sk_buff *),
+ void (*event_handler)(struct notifier_block *, unsigned long, void *))
+{
+ unsigned long flags;
+ int err = 0;
+
+ if (tport >= SCSI_NL_MAX_TRANSPORTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+
+ if (transports[tport].msg_handler || transports[tport].event_handler) {
+ err = -EALREADY;
+ goto register_out;
+ }
+
+ transports[tport].msg_handler = msg_handler;
+ transports[tport].event_handler = event_handler;
+ transports[tport].flags = 0;
+ transports[tport].refcnt = 0;
+
+register_out:
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
+
+
+/**
+ * scsi_nl_remove_transport -
+ * Disable transport receiption of messages and events
+ *
+ * @tport: transport deregistering handlers
+ *
+ **/
+void
+scsi_nl_remove_transport(u8 tport)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+
+ if (tport < SCSI_NL_MAX_TRANSPORTS) {
+ transports[tport].flags |= HANDLER_DELETING;
+
+ while (transports[tport].refcnt != 0) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ schedule_timeout_uninterruptible(HZ/4);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ transports[tport].msg_handler = NULL;
+ transports[tport].event_handler = NULL;
+ transports[tport].flags = 0;
+ }
+
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
+
+
+/**
+ * scsi_nl_add_driver -
+ * A driver is registering its interfaces for SCSI netlink messages
+ *
+ * @vendor_id: A unique identification value for the driver.
+ * @hostt: address of the driver's host template. Used
+ * to verify an shost is bound to the driver
+ * @nlmsg_handler: receive message handler callback
+ * @nlevt_handler: receive event handler callback
+ *
+ * Returns:
+ * 0 on Success
+ * error result otherwise
+ **/
+int
+scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
+ int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
+ u32 len, u32 pid),
+ void (*nlevt_handler)(struct notifier_block *nb,
+ unsigned long event, void *notify_ptr))
+{
+ struct scsi_nl_drvr *driver;
+ unsigned long flags;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (unlikely(!driver)) {
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ driver->dmsg_handler = nlmsg_handler;
+ driver->devt_handler = nlevt_handler;
+ driver->hostt = hostt;
+ driver->vendor_id = vendor_id;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ list_add_tail(&driver->next, &scsi_nl_drivers);
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
+
+
+/**
+ * scsi_nl_remove_driver -
+ * An driver is unregistering with the SCSI netlink messages
+ *
+ * @vendor_id: The unique identification value for the driver.
+ **/
+void
+scsi_nl_remove_driver(u64 vendor_id)
+{
+ struct scsi_nl_drvr *driver;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+
+ list_for_each_entry(driver, &scsi_nl_drivers, next) {
+ if (driver->vendor_id == vendor_id) {
+ driver->flags |= HANDLER_DELETING;
+ while (driver->refcnt != 0) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ schedule_timeout_uninterruptible(HZ/4);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ list_del(&driver->next);
+ kfree(driver);
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
+ __func__, (unsigned long long)vendor_id);
+ return;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
+
+
+/**
+ * scsi_netlink_init - Called by SCSI subsystem to intialize
+ * the SCSI transport netlink interface
*
**/
void
@@ -136,6 +488,8 @@ scsi_netlink_init(void)
{
int error;
+ INIT_LIST_HEAD(&scsi_nl_drivers);
+
error = netlink_register_notifier(&scsi_netlink_notifier);
if (error) {
printk(KERN_ERR "%s: register of event handler failed - %d\n",
@@ -150,8 +504,15 @@ scsi_netlink_init(void)
printk(KERN_ERR "%s: register of recieve handler failed\n",
__func__);
netlink_unregister_notifier(&scsi_netlink_notifier);
+ return;
}
+ /* Register the entry points for the generic SCSI transport */
+ error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
+ scsi_generic_msg_handler, NULL);
+ if (error)
+ printk(KERN_ERR "%s: register of GENERIC transport handler"
+ " failed - %d\n", __func__, error);
return;
}
@@ -163,6 +524,8 @@ scsi_netlink_init(void)
void
scsi_netlink_exit(void)
{
+ scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
+
if (scsi_nl_sock) {
netlink_kernel_release(scsi_nl_sock);
netlink_unregister_notifier(&scsi_netlink_notifier);
@@ -172,3 +535,147 @@ scsi_netlink_exit(void)
}
+/*
+ * Exported Interfaces
+ */
+
+/**
+ * scsi_nl_send_transport_msg -
+ * Generic function to send a single message from a SCSI transport to
+ * a single process
+ *
+ * @pid: receiving pid
+ * @hdr: message payload
+ *
+ **/
+void
+scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ const char *fn;
+ char *datab;
+ u32 len, skblen;
+ int err;
+
+ if (!scsi_nl_sock) {
+ err = -ENOENT;
+ fn = "netlink socket";
+ goto msg_fail;
+ }
+
+ len = NLMSG_SPACE(hdr->msglen);
+ skblen = NLMSG_SPACE(len);
+
+ skb = alloc_skb(skblen, GFP_KERNEL);
+ if (!skb) {
+ err = -ENOBUFS;
+ fn = "alloc_skb";
+ goto msg_fail;
+ }
+
+ nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
+ if (!nlh) {
+ err = -ENOBUFS;
+ fn = "nlmsg_put";
+ goto msg_fail_skb;
+ }
+ datab = NLMSG_DATA(nlh);
+ memcpy(datab, hdr, hdr->msglen);
+
+ err = nlmsg_unicast(scsi_nl_sock, skb, pid);
+ if (err < 0) {
+ fn = "nlmsg_unicast";
+ /* nlmsg_unicast already kfree_skb'd */
+ goto msg_fail;
+ }
+
+ return;
+
+msg_fail_skb:
+ kfree_skb(skb);
+msg_fail:
+ printk(KERN_WARNING
+ "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
+ "msglen %d: %s : err %d\n",
+ __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
+ fn, err);
+ return;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
+
+
+/**
+ * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
+ * to a specific process id.
+ *
+ * @pid: process id of the receiver
+ * @host_no: host # sending the message
+ * @vendor_id: unique identifier for the driver's vendor
+ * @data_len: amount, in bytes, of vendor unique payload data
+ * @data_buf: pointer to vendor unique data buffer
+ *
+ * Returns:
+ * 0 on succesful return
+ * otherwise, failing error code
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+int
+scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
+ char *data_buf, u32 data_len)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct scsi_nl_host_vendor_msg *msg;
+ u32 len, skblen;
+ int err;
+
+ if (!scsi_nl_sock) {
+ err = -ENOENT;
+ goto send_vendor_fail;
+ }
+
+ len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
+ skblen = NLMSG_SPACE(len);
+
+ skb = alloc_skb(skblen, GFP_KERNEL);
+ if (!skb) {
+ err = -ENOBUFS;
+ goto send_vendor_fail;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
+ skblen - sizeof(*nlh), 0);
+ if (!nlh) {
+ err = -ENOBUFS;
+ goto send_vendor_fail_skb;
+ }
+ msg = NLMSG_DATA(nlh);
+
+ INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
+ SCSI_NL_SHOST_VENDOR, len);
+ msg->vendor_id = vendor_id;
+ msg->host_no = host_no;
+ msg->vmsg_datalen = data_len; /* bytes */
+ memcpy(&msg[1], data_buf, data_len);
+
+ err = nlmsg_unicast(scsi_nl_sock, skb, pid);
+ if (err)
+ /* nlmsg_multicast already kfree_skb'd */
+ goto send_vendor_fail;
+
+ return 0;
+
+send_vendor_fail_skb:
+ kfree_skb(skb);
+send_vendor_fail:
+ printk(KERN_WARNING
+ "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
+ __func__, host_no, err);
+ return err;
+}
+EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
+
+
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 79f0f751120..6cddd5dd323 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,6 +4,7 @@
#include <linux/device.h>
struct request_queue;
+struct request;
struct scsi_cmnd;
struct scsi_device;
struct scsi_host_template;
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
-extern void __scsi_done(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd);
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
/* scsi_error.c */
-extern void scsi_add_timer(struct scsi_cmnd *, int,
- void (*)(struct scsi_cmnd *));
-extern int scsi_delete_timer(struct scsi_cmnd *);
-extern void scsi_times_out(struct scsi_cmnd *cmd);
+extern enum blk_eh_timer_return scsi_times_out(struct request *req);
extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c6a904a45bf..82f7b2dd08a 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
int error = -ENXIO;
shost = scsi_host_lookup(host);
- if (IS_ERR(shost))
- return PTR_ERR(shost);
+ if (!shost)
+ return error;
if (shost->transportt->user_scan)
error = shost->transportt->user_scan(shost, channel, id, lun);
@@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
int error = -ENXIO;
shost = scsi_host_lookup(host);
- if (IS_ERR(shost))
- return PTR_ERR(shost);
+ if (!shost)
+ return error;
sdev = scsi_device_lookup(shost, channel, id, lun);
if (sdev) {
scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 84b4879cff1..334862e26a1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -730,6 +730,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
int *bflags, int async)
{
+ int ret;
+
/*
* XXX do not save the inquiry, since it can change underneath us,
* save just vendor/model/rev.
@@ -885,7 +887,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* set the device running here so that slave configure
* may do I/O */
- scsi_device_set_state(sdev, SDEV_RUNNING);
+ ret = scsi_device_set_state(sdev, SDEV_RUNNING);
+ if (ret) {
+ ret = scsi_device_set_state(sdev, SDEV_BLOCK);
+
+ if (ret) {
+ sdev_printk(KERN_ERR, sdev,
+ "in wrong state %s to complete scan\n",
+ scsi_device_state_name(sdev->sdev_state));
+ return SCSI_SCAN_NO_RESPONSE;
+ }
+ }
if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
sdev->use_192_bytes_for_3f = 1;
@@ -899,7 +911,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure) {
- int ret = sdev->host->hostt->slave_configure(sdev);
+ ret = sdev->host->hostt->slave_configure(sdev);
if (ret) {
/*
* if LLDD reports slave not present, don't clutter
@@ -994,7 +1006,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
*/
sdev = scsi_device_lookup_by_target(starget, lun);
if (sdev) {
- if (rescan || sdev->sdev_state != SDEV_CREATED) {
+ if (rescan || !scsi_device_created(sdev)) {
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
"scsi scan: device exists on %s\n",
sdev->sdev_gendev.bus_id));
@@ -1080,7 +1092,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* PDT=1Fh none (no FDD connected to the requested logical unit)
*/
if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
- (result[0] & 0x1f) == 0x1f) {
+ (result[0] & 0x1f) == 0x1f &&
+ !scsi_is_wlun(lun)) {
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
"scsi scan: peripheral device type"
" of 31, no device added\n"));
@@ -1466,7 +1479,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
kfree(lun_data);
out:
scsi_device_put(sdev);
- if (sdev->sdev_state == SDEV_CREATED)
+ if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ab3c71869be..93c28f30bbd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -34,6 +34,7 @@ static const struct {
{ SDEV_QUIESCE, "quiesce" },
{ SDEV_OFFLINE, "offline" },
{ SDEV_BLOCK, "blocked" },
+ { SDEV_CREATED_BLOCK, "created-blocked" },
};
const char *scsi_device_state_name(enum scsi_device_state state)
@@ -560,12 +561,15 @@ sdev_rd_attr (vendor, "%.8s\n");
sdev_rd_attr (model, "%.16s\n");
sdev_rd_attr (rev, "%.4s\n");
+/*
+ * TODO: can we make these symlinks to the block layer ones?
+ */
static ssize_t
sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev;
sdev = to_scsi_device(dev);
- return snprintf (buf, 20, "%d\n", sdev->timeout / HZ);
+ return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
}
static ssize_t
@@ -576,7 +580,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
int timeout;
sdev = to_scsi_device(dev);
sscanf (buf, "%d\n", &timeout);
- sdev->timeout = timeout * HZ;
+ blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
return count;
}
static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39a..48ba413f7f6 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
int err;
dprintk("%lx %u\n", uaddr, len);
- err = blk_rq_map_user(q, rq, (void *)uaddr, len);
+ err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
if (err) {
/*
* TODO: need to fixup sg_tablesize, max_segment_size,
@@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
/* TODO: replace with a O(1) alg */
shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return -EINVAL;
}
@@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return err;
}
@@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return err;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 56823fd1fb8..d5f7653bb94 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -40,31 +40,7 @@
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
static void fc_vport_sched_delete(struct work_struct *work);
-
-/*
- * This is a temporary carrier for creating a vport. It will eventually
- * be replaced by a real message definition for sgio or netlink.
- *
- * fc_vport_identifiers: This set of data contains all elements
- * to uniquely identify and instantiate a FC virtual port.
- *
- * Notes:
- * symbolic_name: The driver is to append the symbolic_name string data
- * to the symbolic_node_name data that it generates by default.
- * the resulting combination should then be registered with the switch.
- * It is expected that things like Xen may stuff a VM title into
- * this field.
- */
-struct fc_vport_identifiers {
- u64 node_name;
- u64 port_name;
- u32 roles;
- bool disable;
- enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
- char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
-};
-
-static int fc_vport_create(struct Scsi_Host *shost, int channel,
+static int fc_vport_setup(struct Scsi_Host *shost, int channel,
struct device *pdev, struct fc_vport_identifiers *ids,
struct fc_vport **vport);
@@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
vid.disable = false; /* always enabled */
/* we only allow support on Channel 0 !!! */
- stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
+ stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
return stat ? stat : count;
}
static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@@ -1950,15 +1926,15 @@ static int fc_vport_match(struct attribute_container *cont,
* Notes:
* This routine assumes no locks are held on entry.
*/
-static enum scsi_eh_timer_return
+static enum blk_eh_timer_return
fc_timed_out(struct scsi_cmnd *scmd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
if (rport->port_state == FC_PORTSTATE_BLOCKED)
- return EH_RESET_TIMER;
+ return BLK_EH_RESET_TIMER;
- return EH_NOT_HANDLED;
+ return BLK_EH_NOT_HANDLED;
}
/*
@@ -3103,7 +3079,7 @@ fc_scsi_scan_rport(struct work_struct *work)
/**
- * fc_vport_create - allocates and creates a FC virtual port.
+ * fc_vport_setup - allocates and creates a FC virtual port.
* @shost: scsi host the virtual port is connected to.
* @channel: Channel on shost port connected to.
* @pdev: parent device for vport
@@ -3118,7 +3094,7 @@ fc_scsi_scan_rport(struct work_struct *work)
* This routine assumes no locks are held on entry.
*/
static int
-fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
+fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
{
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@@ -3231,6 +3207,28 @@ delete_vport:
return error;
}
+/**
+ * fc_vport_create - Admin App or LLDD requests creation of a vport
+ * @shost: scsi host the virtual port is connected to.
+ * @channel: channel on shost port connected to.
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+struct fc_vport *
+fc_vport_create(struct Scsi_Host *shost, int channel,
+ struct fc_vport_identifiers *ids)
+{
+ int stat;
+ struct fc_vport *vport;
+
+ stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
+ ids, &vport);
+ return stat ? NULL : vport;
+}
+EXPORT_SYMBOL(fc_vport_create);
/**
* fc_vport_terminate - Admin App or LLDD requests termination of a vport
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 043c3921164..0ce5f7cdfe2 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1361,7 +1361,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
return -EINVAL;
shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "target discovery could not find host no %u\n",
ev->u.tgt_dscvr.host_no);
return -ENODEV;
@@ -1387,7 +1387,7 @@ iscsi_set_host_param(struct iscsi_transport *transport,
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "set_host_param could not find host no %u\n",
ev->u.set_host_param.host_no);
return -ENODEV;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d785645..a7b53be6336 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
#include <linux/blkpg.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/string_helpers.h>
#include <asm/uaccess.h>
#include <scsi/scsi.h>
@@ -86,6 +87,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define SD_MINORS 16
+#else
+#define SD_MINORS 0
+#endif
+
static int sd_revalidate_disk(struct gendisk *);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
@@ -159,7 +166,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
sd_print_sense_hdr(sdkp, &sshdr);
return -EINVAL;
}
- sd_revalidate_disk(sdkp->disk);
+ revalidate_disk(sdkp->disk);
return count;
}
@@ -377,7 +384,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
sector_t block = rq->sector;
sector_t threshold;
unsigned int this_count = rq->nr_sectors;
- unsigned int timeout = sdp->timeout;
int ret;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -578,7 +584,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->transfersize = sdp->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = SD_MAX_RETRIES;
- SCpnt->timeout_per_command = timeout;
/*
* This indicates that the command is ready from our end to be
@@ -910,7 +915,7 @@ static void sd_rescan(struct device *dev)
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
if (sdkp) {
- sd_revalidate_disk(sdkp->disk);
+ revalidate_disk(sdkp->disk);
scsi_disk_put(sdkp);
}
}
@@ -1429,27 +1434,21 @@ got_data:
*/
sector_size = 512;
}
+ blk_queue_hardsect_size(sdp->request_queue, sector_size);
+
{
- /*
- * The msdos fs needs to know the hardware sector size
- * So I have created this table. See ll_rw_blk.c
- * Jacques Gelinas (Jacques@solucorp.qc.ca)
- */
- int hard_sector = sector_size;
- sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
- struct request_queue *queue = sdp->request_queue;
- sector_t mb = sz;
+ char cap_str_2[10], cap_str_10[10];
+ u64 sz = sdkp->capacity << ffz(~sector_size);
- blk_queue_hardsect_size(queue, hard_sector);
- /* avoid 64-bit division on 32-bit platforms */
- sector_div(sz, 625);
- mb -= sz - 974;
- sector_div(mb, 1950);
+ string_get_size(sz, STRING_UNITS_2, cap_str_2,
+ sizeof(cap_str_2));
+ string_get_size(sz, STRING_UNITS_10, cap_str_10,
+ sizeof(cap_str_10));
sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte hardware sectors (%llu MB)\n",
+ "%llu %d-byte hardware sectors: (%s/%s)\n",
(unsigned long long)sdkp->capacity,
- hard_sector, (unsigned long long)mb);
+ sector_size, cap_str_10, cap_str_2);
}
/* Rescale capacity to 512-byte units */
@@ -1764,6 +1763,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
}
/**
+ * sd_format_disk_name - format disk name
+ * @prefix: name prefix - ie. "sd" for SCSI disks
+ * @index: index of the disk to format name for
+ * @buf: output buffer
+ * @buflen: length of the output buffer
+ *
+ * SCSI disk names starts at sda. The 26th device is sdz and the
+ * 27th is sdaa. The last one for two lettered suffix is sdzz
+ * which is followed by sdaaa.
+ *
+ * This is basically 26 base counting with one extra 'nil' entry
+ * at the beggining from the second digit on and can be
+ * determined using similar method as 26 base conversion with the
+ * index shifted -1 after each digit is computed.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
+{
+ const int base = 'z' - 'a' + 1;
+ char *begin = buf + strlen(prefix);
+ char *end = buf + buflen;
+ char *p;
+ int unit;
+
+ p = end - 1;
+ *p = '\0';
+ unit = base;
+ do {
+ if (p == begin)
+ return -EINVAL;
+ *--p = 'a' + (index % unit);
+ index = (index / unit) - 1;
+ } while (index >= 0);
+
+ memmove(begin, p, end - p);
+ memcpy(buf, prefix, strlen(prefix));
+
+ return 0;
+}
+
+/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
* for each scsi device (not just disks) present.
@@ -1801,7 +1846,7 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- gd = alloc_disk(16);
+ gd = alloc_disk(SD_MINORS);
if (!gd)
goto out_free;
@@ -1815,8 +1860,8 @@ static int sd_probe(struct device *dev)
if (error)
goto out_put;
- error = -EBUSY;
- if (index >= SD_MAX_DISKS)
+ error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
+ if (error)
goto out_free_index;
sdkp->device = sdp;
@@ -1826,11 +1871,12 @@ static int sd_probe(struct device *dev)
sdkp->openers = 0;
sdkp->previous_state = 1;
- if (!sdp->timeout) {
+ if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
- sdp->timeout = SD_TIMEOUT;
+ blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
else
- sdp->timeout = SD_MOD_TIMEOUT;
+ blk_queue_rq_timeout(sdp->request_queue,
+ SD_MOD_TIMEOUT);
}
device_initialize(&sdkp->dev);
@@ -1843,24 +1889,12 @@ static int sd_probe(struct device *dev)
get_device(&sdp->sdev_gendev);
- gd->major = sd_major((index & 0xf0) >> 4);
- gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
- gd->minors = 16;
- gd->fops = &sd_fops;
-
- if (index < 26) {
- sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
- } else if (index < (26 + 1) * 26) {
- sprintf(gd->disk_name, "sd%c%c",
- 'a' + index / 26 - 1,'a' + index % 26);
- } else {
- const unsigned int m1 = (index / 26 - 1) / 26 - 1;
- const unsigned int m2 = (index / 26 - 1) % 26;
- const unsigned int m3 = index % 26;
- sprintf(gd->disk_name, "sd%c%c%c",
- 'a' + m1, 'a' + m2, 'a' + m3);
+ if (index < SD_MAX_DISKS) {
+ gd->major = sd_major((index & 0xf0) >> 4);
+ gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+ gd->minors = SD_MINORS;
}
-
+ gd->fops = &sd_fops;
gd->private_data = &sdkp->driver;
gd->queue = sdkp->device->request_queue;
@@ -1869,7 +1903,7 @@ static int sd_probe(struct device *dev)
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
gd->driverfs_dev = &sdp->sdev_gendev;
- gd->flags = GENHD_FL_DRIVERFS;
+ gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
if (sdp->removable)
gd->flags |= GENHD_FL_REMOVABLE;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 661f9f21650..ba9b9bbd4e7 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
-#include <linux/scatterlist.h>
#include <linux/blktrace_api.h>
#include <linux/smp_lock.h>
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
#endif
#define SG_ALLOW_DIO_DEF 0
-#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
#define SG_MAX_DEVS 32768
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
unsigned bufflen; /* Size of (aggregate) data buffer */
- unsigned b_malloc_len; /* actual len malloc'ed in buffer */
- struct scatterlist *buffer;/* scatter list */
+ struct page **pages;
+ int page_order;
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
unsigned char cmd_opcode; /* first byte of command */
} Sg_scatter_hold;
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
volatile char done; /* 0->before bh, 1->before read, 2->read */
+ struct request *rq;
+ struct bio *bio;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
static int sg_fasync(int fd, struct file *filp, int mode);
/* tasklet or soft irq callback */
-static void sg_cmd_done(void *data, char *sense, int result, int resid);
-static int sg_start_req(Sg_request * srp);
+static void sg_rq_end_io(struct request *rq, int uptodate);
+static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static void sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
int read_only, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
-static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
- int wr_xf, int *countp, unsigned char __user **up);
-static int sg_write_xfer(Sg_request * srp);
-static int sg_read_xfer(Sg_request * srp);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
static void sg_remove_scat(Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
-static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
-static void sg_page_free(struct page *page, int size);
static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
-static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
static Sg_device *sg_get_dev(int dev);
#ifdef CONFIG_SCSI_PROC_FS
static int sg_last_dev(void);
@@ -529,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
err = -EFAULT;
goto err_out;
}
- err = sg_read_xfer(srp);
- err_out:
+err_out:
sg_finish_rem_req(srp);
return (0 == err) ? count : err;
}
@@ -612,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
- hp->dxferp = (char __user *)buf + cmd_size;
+ if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ hp->dxferp = (char __user *)buf + cmd_size;
+ else
+ hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
@@ -732,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if ((k = sg_start_req(srp))) {
+ k = sg_start_req(srp, cmnd);
+ if (k) {
SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
}
- if ((k = sg_write_xfer(srp))) {
- SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
- sg_finish_rem_req(srp);
- return k;
- }
if (sdp->detached) {
sg_finish_rem_req(srp);
return -ENODEV;
@@ -763,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
break;
}
hp->duration = jiffies_to_msecs(jiffies);
-/* Now send everything of to mid-level. The next time we hear about this
- packet is when sg_cmd_done() is called (i.e. a callback). */
- if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
- hp->dxfer_len, srp->data.k_use_sg, timeout,
- SG_DEFAULT_RETRIES, srp, sg_cmd_done,
- GFP_ATOMIC)) {
- SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
- /*
- * most likely out of mem, but could also be a bad map
- */
- sg_finish_rem_req(srp);
- return -ENOMEM;
- } else
- return 0;
+
+ srp->rq->timeout = timeout;
+ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
+ srp->rq, 1, sg_rq_end_io);
+ return 0;
}
static int
@@ -1192,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
- struct scatterlist *sg;
- int k;
+ int k, length;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
return VM_FAULT_SIGBUS;
@@ -1203,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
- sg = rsv_schp->buffer;
sa = vma->vm_start;
- for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
- ++k, sg = sg_next(sg)) {
+ length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
- len = (len < sg->length) ? len : sg->length;
+ len = (len < length) ? len : length;
if (offset < len) {
- struct page *page;
- page = virt_to_page(page_address(sg_page(sg)) + offset);
+ struct page *page = nth_page(rsv_schp->pages[k],
+ offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
@@ -1233,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
Sg_fd *sfp;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
- int k;
- struct scatterlist *sg;
+ int k, length;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
@@ -1248,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return -ENOMEM; /* cannot map more than reserved buffer */
sa = vma->vm_start;
- sg = rsv_schp->buffer;
- for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
- ++k, sg = sg_next(sg)) {
+ length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
- len = (len < sg->length) ? len : sg->length;
+ len = (len < length) ? len : length;
sa += len;
}
@@ -1263,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-/* This function is a "bottom half" handler that is called by the
- * mid level when a command is completed (or has failed). */
-static void
-sg_cmd_done(void *data, char *sense, int result, int resid)
+/*
+ * This function is a "bottom half" handler that is called by the mid
+ * level when a command is completed (or has failed).
+ */
+static void sg_rq_end_io(struct request *rq, int uptodate)
{
- Sg_request *srp = data;
+ struct sg_request *srp = rq->end_io_data;
Sg_device *sdp = NULL;
Sg_fd *sfp;
unsigned long iflags;
unsigned int ms;
+ char *sense;
+ int result, resid;
if (NULL == srp) {
printk(KERN_ERR "sg_cmd_done: NULL request\n");
@@ -1286,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
return;
}
+ sense = rq->sense;
+ result = rq->errors;
+ resid = rq->data_len;
SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1296,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
if (0 != result) {
struct scsi_sense_hdr sshdr;
- memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
srp->header.msg_status = msg_byte(result);
@@ -1634,37 +1617,79 @@ exit_sg(void)
idr_destroy(&sg_index_idr);
}
-static int
-sg_start_req(Sg_request * srp)
+static int sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
+ struct request *rq;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
+ unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
+ struct request_queue *q = sfp->parentdp->device->request_queue;
+ struct rq_map_data *md, map_data;
+ int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
+
+ SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
+ dxfer_len));
+
+ rq = blk_get_request(q, rw, GFP_ATOMIC);
+ if (!rq)
+ return -ENOMEM;
+
+ memcpy(rq->cmd, cmd, hp->cmd_len);
+
+ rq->cmd_len = hp->cmd_len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ srp->rq = rq;
+ rq->end_io_data = srp;
+ rq->sense = srp->sense_b;
+ rq->retries = SG_DEFAULT_RETRIES;
- SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
- if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
- (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
- (!sfp->parentdp->device->host->unchecked_isa_dma)) {
- res = sg_build_direct(srp, sfp, dxfer_len);
- if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
- return res;
- }
- if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
- sg_link_reserve(sfp, srp, dxfer_len);
- else {
- res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res) {
- sg_remove_scat(req_schp);
- return res;
+
+ if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
+ dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
+ !sfp->parentdp->device->host->unchecked_isa_dma &&
+ blk_rq_aligned(q, hp->dxferp, dxfer_len))
+ md = NULL;
+ else
+ md = &map_data;
+
+ if (md) {
+ if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ sg_link_reserve(sfp, srp, dxfer_len);
+ else {
+ res = sg_build_indirect(req_schp, sfp, dxfer_len);
+ if (res)
+ return res;
}
+
+ md->pages = req_schp->pages;
+ md->page_order = req_schp->page_order;
+ md->nr_entries = req_schp->k_use_sg;
}
- return 0;
+
+ if (iov_count)
+ res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
+ hp->dxfer_len, GFP_ATOMIC);
+ else
+ res = blk_rq_map_user(q, rq, md, hp->dxferp,
+ hp->dxfer_len, GFP_ATOMIC);
+
+ if (!res) {
+ srp->bio = rq->bio;
+
+ if (!md) {
+ req_schp->dio_in_use = 1;
+ hp->info |= SG_INFO_DIRECT_IO;
+ }
+ }
+ return res;
}
static void
@@ -1678,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp)
sg_unlink_reserve(sfp, srp);
else
sg_remove_scat(req_schp);
+
+ if (srp->rq) {
+ if (srp->bio)
+ blk_rq_unmap_user(srp->bio);
+
+ blk_put_request(srp->rq);
+ }
+
sg_remove_request(sfp, srp);
}
static int
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
{
- int sg_bufflen = tablesize * sizeof(struct scatterlist);
+ int sg_bufflen = tablesize * sizeof(struct page *);
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
- /*
- * TODO: test without low_dma, we should not need it since
- * the block layer will bounce the buffer for us
- *
- * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
- */
- if (sfp->low_dma)
- gfp_flags |= GFP_DMA;
- schp->buffer = kzalloc(sg_bufflen, gfp_flags);
- if (!schp->buffer)
+ schp->pages = kzalloc(sg_bufflen, gfp_flags);
+ if (!schp->pages)
return -ENOMEM;
- sg_init_table(schp->buffer, tablesize);
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
-#ifdef SG_ALLOW_DIO_CODE
-/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
- /* TODO: hopefully we can use the generic block layer code */
-
-/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
- - mapping of all pages not successful
- (i.e., either completely successful or fails)
-*/
-static int
-st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
- unsigned long uaddr, size_t count, int rw)
-{
- unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int res, i, j;
- struct page **pages;
-
- /* User attempted Overflow! */
- if ((uaddr + count) < uaddr)
- return -EINVAL;
-
- /* Too big */
- if (nr_pages > max_pages)
- return -ENOMEM;
-
- /* Hmm? */
- if (count == 0)
- return 0;
-
- if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
- return -ENOMEM;
-
- /* Try to fault in all of the necessary pages */
- down_read(&current->mm->mmap_sem);
- /* rw==READ means read from drive, write into memory area */
- res = get_user_pages(
- current,
- current->mm,
- uaddr,
- nr_pages,
- rw == READ,
- 0, /* don't force */
- pages,
- NULL);
- up_read(&current->mm->mmap_sem);
-
- /* Errors and no page mapped should return here */
- if (res < nr_pages)
- goto out_unmap;
-
- for (i=0; i < nr_pages; i++) {
- /* FIXME: flush superflous for rw==READ,
- * probably wrong function for rw==WRITE
- */
- flush_dcache_page(pages[i]);
- /* ?? Is locking needed? I don't think so */
- /* if (!trylock_page(pages[i]))
- goto out_unlock; */
- }
-
- sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
- if (nr_pages > 1) {
- sgl[0].length = PAGE_SIZE - sgl[0].offset;
- count -= sgl[0].length;
- for (i=1; i < nr_pages ; i++)
- sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
- }
- else {
- sgl[0].length = count;
- }
-
- kfree(pages);
- return nr_pages;
-
- out_unmap:
- if (res > 0) {
- for (j=0; j < res; j++)
- page_cache_release(pages[j]);
- res = 0;
- }
- kfree(pages);
- return res;
-}
-
-
-/* And unmap them... */
-static int
-st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
- int dirtied)
-{
- int i;
-
- for (i=0; i < nr_pages; i++) {
- struct page *page = sg_page(&sgl[i]);
-
- if (dirtied)
- SetPageDirty(page);
- /* unlock_page(page); */
- /* FIXME: cache flush missing for rw==READ
- * FIXME: call the correct reference counting function
- */
- page_cache_release(page);
- }
-
- return 0;
-}
-
-/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
-#endif
-
-
-/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
-static int
-sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
-{
-#ifdef SG_ALLOW_DIO_CODE
- sg_io_hdr_t *hp = &srp->header;
- Sg_scatter_hold *schp = &srp->data;
- int sg_tablesize = sfp->parentdp->sg_tablesize;
- int mx_sc_elems, res;
- struct scsi_device *sdev = sfp->parentdp->device;
-
- if (((unsigned long)hp->dxferp &
- queue_dma_alignment(sdev->request_queue)) != 0)
- return 1;
-
- mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
- if (mx_sc_elems <= 0) {
- return 1;
- }
- res = st_map_user_pages(schp->buffer, mx_sc_elems,
- (unsigned long)hp->dxferp, dxfer_len,
- (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
- if (res <= 0) {
- sg_remove_scat(schp);
- return 1;
- }
- schp->k_use_sg = res;
- schp->dio_in_use = 1;
- hp->info |= SG_INFO_DIRECT_IO;
- return 0;
-#else
- return 1;
-#endif
-}
-
static int
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
- struct scatterlist *sg;
- int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
+ int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
- int blk_size = buff_size;
- struct page *p = NULL;
+ int blk_size = buff_size, order;
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
if (blk_size < 0)
return -EFAULT;
@@ -1881,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
} else
scatter_elem_sz_prev = num;
}
- for (k = 0, sg = schp->buffer, rem_sz = blk_size;
- (rem_sz > 0) && (k < mx_sc_elems);
- ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
-
+
+ if (sfp->low_dma)
+ gfp_mask |= GFP_DMA;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ gfp_mask |= __GFP_ZERO;
+
+ order = get_order(num);
+retry:
+ ret_sz = 1 << (PAGE_SHIFT + order);
+
+ for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
+ k++, rem_sz -= ret_sz) {
+
num = (rem_sz > scatter_elem_sz_prev) ?
- scatter_elem_sz_prev : rem_sz;
- p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
- if (!p)
- return -ENOMEM;
+ scatter_elem_sz_prev : rem_sz;
+
+ schp->pages[k] = alloc_pages(gfp_mask, order);
+ if (!schp->pages[k])
+ goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1897,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
scatter_elem_sz_prev = ret_sz;
}
}
- sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
"ret_sz=%d\n", k, num, ret_sz));
} /* end of for loop */
+ schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
"rem_sz=%d\n", k, rem_sz));
@@ -1910,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
-
return 0;
-}
-
-static int
-sg_write_xfer(Sg_request * srp)
-{
- sg_io_hdr_t *hp = &srp->header;
- Sg_scatter_hold *schp = &srp->data;
- struct scatterlist *sg = schp->buffer;
- int num_xfer = 0;
- int j, k, onum, usglen, ksglen, res;
- int iovec_count = (int) hp->iovec_count;
- int dxfer_dir = hp->dxfer_direction;
- unsigned char *p;
- unsigned char __user *up;
- int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
-
- if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
- (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
- num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
- if (schp->bufflen < num_xfer)
- num_xfer = schp->bufflen;
- }
- if ((num_xfer <= 0) || (schp->dio_in_use) ||
- (new_interface
- && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
- return 0;
-
- SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
- num_xfer, iovec_count, schp->k_use_sg));
- if (iovec_count) {
- onum = iovec_count;
- if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
- return -EFAULT;
- } else
- onum = 1;
-
- ksglen = sg->length;
- p = page_address(sg_page(sg));
- for (j = 0, k = 0; j < onum; ++j) {
- res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
- if (res)
- return res;
-
- for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg_page(sg))) {
- if (usglen <= 0)
- break;
- if (ksglen > usglen) {
- if (usglen >= num_xfer) {
- if (__copy_from_user(p, up, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_from_user(p, up, usglen))
- return -EFAULT;
- p += usglen;
- ksglen -= usglen;
- break;
- } else {
- if (ksglen >= num_xfer) {
- if (__copy_from_user(p, up, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_from_user(p, up, ksglen))
- return -EFAULT;
- up += ksglen;
- usglen -= ksglen;
- }
- ++k;
- if (k >= schp->k_use_sg)
- return 0;
- }
- }
-
- return 0;
-}
+out:
+ for (i = 0; i < k; i++)
+ __free_pages(schp->pages[k], order);
-static int
-sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
- int wr_xf, int *countp, unsigned char __user **up)
-{
- int num_xfer = (int) hp->dxfer_len;
- unsigned char __user *p = hp->dxferp;
- int count;
+ if (--order >= 0)
+ goto retry;
- if (0 == sg_num) {
- if (wr_xf && ('\0' == hp->interface_id))
- count = (int) hp->flags; /* holds "old" input_size */
- else
- count = num_xfer;
- } else {
- sg_iovec_t iovec;
- if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
- return -EFAULT;
- p = iovec.iov_base;
- count = (int) iovec.iov_len;
- }
- if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
- return -EFAULT;
- if (up)
- *up = p;
- if (countp)
- *countp = count;
- return 0;
+ return -ENOMEM;
}
static void
sg_remove_scat(Sg_scatter_hold * schp)
{
SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
- if (schp->buffer && (schp->sglist_len > 0)) {
- struct scatterlist *sg = schp->buffer;
-
- if (schp->dio_in_use) {
-#ifdef SG_ALLOW_DIO_CODE
- st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
-#endif
- } else {
+ if (schp->pages && schp->sglist_len > 0) {
+ if (!schp->dio_in_use) {
int k;
- for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
- ++k, sg = sg_next(sg)) {
+ for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
SCSI_LOG_TIMEOUT(5, printk(
- "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
- k, sg_page(sg), sg->length));
- sg_page_free(sg_page(sg), sg->length);
+ "sg_remove_scat: k=%d, pg=0x%p\n",
+ k, schp->pages[k]));
+ __free_pages(schp->pages[k], schp->page_order);
}
- }
- kfree(schp->buffer);
- }
- memset(schp, 0, sizeof (*schp));
-}
-static int
-sg_read_xfer(Sg_request * srp)
-{
- sg_io_hdr_t *hp = &srp->header;
- Sg_scatter_hold *schp = &srp->data;
- struct scatterlist *sg = schp->buffer;
- int num_xfer = 0;
- int j, k, onum, usglen, ksglen, res;
- int iovec_count = (int) hp->iovec_count;
- int dxfer_dir = hp->dxfer_direction;
- unsigned char *p;
- unsigned char __user *up;
- int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
-
- if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
- || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
- num_xfer = hp->dxfer_len;
- if (schp->bufflen < num_xfer)
- num_xfer = schp->bufflen;
- }
- if ((num_xfer <= 0) || (schp->dio_in_use) ||
- (new_interface
- && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
- return 0;
-
- SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
- num_xfer, iovec_count, schp->k_use_sg));
- if (iovec_count) {
- onum = iovec_count;
- if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
- return -EFAULT;
- } else
- onum = 1;
-
- p = page_address(sg_page(sg));
- ksglen = sg->length;
- for (j = 0, k = 0; j < onum; ++j) {
- res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
- if (res)
- return res;
-
- for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg_page(sg))) {
- if (usglen <= 0)
- break;
- if (ksglen > usglen) {
- if (usglen >= num_xfer) {
- if (__copy_to_user(up, p, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_to_user(up, p, usglen))
- return -EFAULT;
- p += usglen;
- ksglen -= usglen;
- break;
- } else {
- if (ksglen >= num_xfer) {
- if (__copy_to_user(up, p, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_to_user(up, p, ksglen))
- return -EFAULT;
- up += ksglen;
- usglen -= ksglen;
- }
- ++k;
- if (k >= schp->k_use_sg)
- return 0;
+ kfree(schp->pages);
}
}
-
- return 0;
+ memset(schp, 0, sizeof (*schp));
}
static int
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
{
Sg_scatter_hold *schp = &srp->data;
- struct scatterlist *sg = schp->buffer;
int k, num;
SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -2134,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
if ((!outp) || (num_read_xfer <= 0))
return 0;
- for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
- num = sg->length;
+ num = 1 << (PAGE_SHIFT + schp->page_order);
+ for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(sg_page(sg)),
+ if (__copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(sg_page(sg)),
+ if (__copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
@@ -2177,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
{
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
- struct scatterlist *sg = rsv_schp->buffer;
int k, num, rem;
srp->res_used = 1;
SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
rem = size;
- for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
- num = sg->length;
+ num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg; k++) {
if (rem <= num) {
- sfp->save_scat_len = num;
- sg->length = rem;
req_schp->k_use_sg = k + 1;
req_schp->sglist_len = rsv_schp->sglist_len;
- req_schp->buffer = rsv_schp->buffer;
+ req_schp->pages = rsv_schp->pages;
req_schp->bufflen = size;
- req_schp->b_malloc_len = rsv_schp->b_malloc_len;
+ req_schp->page_order = rsv_schp->page_order;
break;
} else
rem -= num;
@@ -2208,22 +1911,13 @@ static void
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
- Sg_scatter_hold *rsv_schp = &sfp->reserve;
SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
(int) req_schp->k_use_sg));
- if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
- struct scatterlist *sg = rsv_schp->buffer;
-
- if (sfp->save_scat_len > 0)
- (sg + (req_schp->k_use_sg - 1))->length =
- (unsigned) sfp->save_scat_len;
- else
- SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
- }
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
- req_schp->buffer = NULL;
+ req_schp->pages = NULL;
+ req_schp->page_order = 0;
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
@@ -2481,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp)
return srp ? 1 : 0;
}
-/* The size fetched (value output via retSzp) set when non-NULL return */
-static struct page *
-sg_page_malloc(int rqSz, int lowDma, int *retSzp)
-{
- struct page *resp = NULL;
- gfp_t page_mask;
- int order, a_size;
- int resSz;
-
- if ((rqSz <= 0) || (NULL == retSzp))
- return resp;
-
- if (lowDma)
- page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
- else
- page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
-
- for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
- order++, a_size <<= 1) ;
- resSz = a_size; /* rounded up if necessary */
- resp = alloc_pages(page_mask, order);
- while ((!resp) && order) {
- --order;
- a_size >>= 1; /* divide by 2, until PAGE_SIZE */
- resp = alloc_pages(page_mask, order); /* try half */
- resSz = a_size;
- }
- if (resp) {
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- memset(page_address(resp), 0, resSz);
- *retSzp = resSz;
- }
- return resp;
-}
-
-static void
-sg_page_free(struct page *page, int size)
-{
- int order, a_size;
-
- if (!page)
- return;
- for (order = 0, a_size = PAGE_SIZE; a_size < size;
- order++, a_size <<= 1) ;
- __free_pages(page, order);
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def..0f17009c99d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
static int sr_prep_fn(struct request_queue *q, struct request *rq)
{
- int block=0, this_count, s_size, timeout = SR_TIMEOUT;
+ int block = 0, this_count, s_size;
struct scsi_cd *cd;
struct scsi_cmnd *SCpnt;
struct scsi_device *sdp = q->queuedata;
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->transfersize = cd->device->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = MAX_RETRIES;
- SCpnt->timeout_per_command = timeout;
/*
* This indicates that the command is ready from our end to be
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD;
+ blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
+
cd->device = sdev;
cd->disk = disk;
cd->driver = &sr_template;
@@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
struct gendisk *disk = cd->disk;
spin_lock(&sr_index_lock);
- clear_bit(disk->first_minor, sr_index_bits);
+ clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
spin_unlock(&sr_index_lock);
unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d39107b7669..f4e6cde1fd0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
* Shorten our settle_time if needed for
* this command not to time out.
*/
- if (np->s.settle_time_valid && cmd->timeout_per_command) {
- unsigned long tlimit = jiffies + cmd->timeout_per_command;
+ if (np->s.settle_time_valid && cmd->request->timeout) {
+ unsigned long tlimit = jiffies + cmd->request->timeout;
tlimit -= SYM_CONF_TIMER_INTERVAL*2;
if (time_after(np->s.settle_time, tlimit)) {
np->s.settle_time = tlimit;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 1723d71cbf3..69ac6e590f1 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = {
static int __init dc390_module_init(void)
{
if (!disable_clustering)
- printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"
- "\twith \"disable_clustering=1\" and report to maintainers\n");
+ printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
+ printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
if (tmscsim[0] == -1 || tmscsim[0] > 15) {
tmscsim[0] = 7;