aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c55
1 files changed, 25 insertions, 30 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d18da21c9c5..7a91ca3d32a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -44,7 +44,7 @@ struct scsi_host_sg_pool {
#endif
#define SP(x) { x, "sgpool-" #x }
-struct scsi_host_sg_pool scsi_sg_pools[] = {
+static struct scsi_host_sg_pool scsi_sg_pools[] = {
SP(8),
SP(16),
SP(32),
@@ -92,10 +92,12 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
*/
sreq->sr_request->flags &= ~REQ_DONTPREP;
blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
- at_head, sreq, 0);
+ at_head, sreq);
return 0;
}
+static void scsi_run_queue(struct request_queue *q);
+
/*
* Function: scsi_queue_insert()
*
@@ -119,18 +121,14 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
+ struct request_queue *q = device->request_queue;
+ unsigned long flags;
SCSI_LOG_MLQUEUE(1,
printk("Inserting command %p into mlqueue\n", cmd));
/*
- * We are inserting the command into the ml queue. First, we
- * cancel the timer, so it doesn't time out.
- */
- scsi_delete_timer(cmd);
-
- /*
- * Next, set the appropriate busy bit for the device/host.
+ * Set the appropriate busy bit for the device/host.
*
* If the host/device isn't busy, assume that something actually
* completed, and that we should be able to queue a command now.
@@ -148,29 +146,28 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
device->device_blocked = device->max_device_blocked;
/*
- * Register the fact that we own the thing for now.
- */
- cmd->state = SCSI_STATE_MLQUEUE;
- cmd->owner = SCSI_OWNER_MIDLEVEL;
-
- /*
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
scsi_device_unbusy(device);
/*
- * Insert this command at the head of the queue for it's device.
- * It will go before all other commands that are already in the queue.
+ * Requeue this command. It will go before all other commands
+ * that are already in the queue.
*
* NOTE: there is magic here about the way the queue is plugged if
* we have no outstanding commands.
*
- * Although this *doesn't* plug the queue, it does call the request
+ * Although we *don't* plug the queue, we call the request
* function. The SCSI request function detects the blocked condition
* and plugs the queue appropriately.
- */
- blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, cmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_run_queue(q);
+
return 0;
}
@@ -296,9 +293,7 @@ EXPORT_SYMBOL(scsi_wait_req);
*/
static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
- cmd->owner = SCSI_OWNER_MIDLEVEL;
cmd->serial_number = 0;
- cmd->abort_reason = 0;
memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
@@ -319,7 +314,6 @@ static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
cmd->buffer = cmd->request_buffer;
cmd->bufflen = cmd->request_bufflen;
- cmd->abort_reason = 0;
return 1;
}
@@ -485,8 +479,13 @@ static void scsi_run_queue(struct request_queue *q)
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
+ unsigned long flags;
+
cmd->request->flags &= ~REQ_DONTPREP;
- blk_insert_request(q, cmd->request, 1, cmd, 1);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, cmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
}
@@ -615,8 +614,6 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mas
sgp = scsi_sg_pools + cmd->sglist_len;
sgl = mempool_alloc(sgp->pool, gfp_mask);
- if (sgl)
- memset(sgl, 0, sgp->size);
return sgl;
}
@@ -624,7 +621,7 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
{
struct scsi_host_sg_pool *sgp;
- BUG_ON(index > SG_MEMPOOL_NR);
+ BUG_ON(index >= SG_MEMPOOL_NR);
sgp = scsi_sg_pools + index;
mempool_free(sgl, sgp->pool);
@@ -941,10 +938,8 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
* if sg table allocation fails, requeue request later.
*/
sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
- if (unlikely(!sgpnt)) {
- req->flags |= REQ_SPECIAL;
+ if (unlikely(!sgpnt))
return BLKPREP_DEFER;
- }
cmd->request_buffer = (char *) sgpnt;
cmd->request_bufflen = req->nr_sectors << 9;