diff options
author | Hank Janssen <hjanssen@microsoft.com> | 2009-07-13 15:33:02 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-15 12:01:43 -0700 |
commit | f82bd0462f251ecbe13160a3f34bd48b5087666c (patch) | |
tree | 49d6139178d8dd3b23000cb54eef8419bb3a6c4d /drivers | |
parent | 3e7ee4902fe6996048f03433dd111426db3cfa92 (diff) |
Staging: hv: add the Hyper-V virtual block driver
This is the virtual block driver when running Linux on top of Hyper-V.
Signed-off-by: Hank Janssen <hjanssen@microsoft.com>
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/hv/BlkVsc.c | 107 | ||||
-rw-r--r-- | drivers/staging/hv/blkvsc_drv.c | 1547 |
2 files changed, 1654 insertions, 0 deletions
diff --git a/drivers/staging/hv/BlkVsc.c b/drivers/staging/hv/BlkVsc.c new file mode 100644 index 00000000000..58b96981cba --- /dev/null +++ b/drivers/staging/hv/BlkVsc.c @@ -0,0 +1,107 @@ +/* + * + * Copyright (c) 2009, Microsoft Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Authors: + * Hank Janssen <hjanssen@microsoft.com> + * + */ + + +#include "../storvsc/StorVsc.c" + +static const char* gBlkDriverName="blkvsc"; + +//{32412632-86cb-44a2-9b5c-50d1417354f5} +static const GUID gBlkVscDeviceType={ + .Data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5} +}; + +// Static routines +static int +BlkVscOnDeviceAdd( + DEVICE_OBJECT *Device, + void *AdditionalInfo + ); + + +int +BlkVscInitialize( + DRIVER_OBJECT *Driver + ) +{ + STORVSC_DRIVER_OBJECT* storDriver = (STORVSC_DRIVER_OBJECT*)Driver; + int ret=0; + + DPRINT_ENTER(BLKVSC); + + // Make sure we are at least 2 pages since 1 page is used for control + ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1)); + + Driver->name = gBlkDriverName; + memcpy(&Driver->deviceType, &gBlkVscDeviceType, sizeof(GUID)); + + storDriver->RequestExtSize = sizeof(STORVSC_REQUEST_EXTENSION); + // Divide the ring buffer data size (which is 1 page less than the ring buffer size since that page is reserved for the ring buffer indices) + // by the max request size (which is VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER + VSTOR_PACKET + UINT64) + storDriver->MaxOutstandingRequestsPerChannel = + ((storDriver->RingBufferSize - PAGE_SIZE) / ALIGN_UP(MAX_MULTIPAGE_BUFFER_PACKET + sizeof(VSTOR_PACKET) + sizeof(UINT64),sizeof(UINT64))); + + DPRINT_INFO(BLKVSC, "max io outstd %u", storDriver->MaxOutstandingRequestsPerChannel); + + // Setup the dispatch table + storDriver->Base.OnDeviceAdd = BlkVscOnDeviceAdd; + storDriver->Base.OnDeviceRemove = StorVscOnDeviceRemove; + storDriver->Base.OnCleanup = StorVscOnCleanup; + + storDriver->OnIORequest = StorVscOnIORequest; + + DPRINT_EXIT(BLKVSC); + + return ret; +} + +int +BlkVscOnDeviceAdd( + DEVICE_OBJECT *Device, + void *AdditionalInfo + ) +{ + int ret=0; + STORVSC_DEVICE_INFO *deviceInfo = (STORVSC_DEVICE_INFO*)AdditionalInfo; + + DPRINT_ENTER(BLKVSC); + + ret = StorVscOnDeviceAdd(Device, AdditionalInfo); + + if (ret != 0) + { + DPRINT_EXIT(BLKVSC); + + return ret; + } + + // We need to use the device instance guid to set the path and target id. For IDE devices, the + // device instance id is formatted as <bus id> - <device id> - 8899 - 000000000000. + deviceInfo->PathId = Device->deviceInstance.Data[3] << 24 | Device->deviceInstance.Data[2] << 16 | + Device->deviceInstance.Data[1] << 8 |Device->deviceInstance.Data[0]; + + deviceInfo->TargetId = Device->deviceInstance.Data[5] << 8 | Device->deviceInstance.Data[4]; + + DPRINT_EXIT(BLKVSC); + + return ret; +} diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c new file mode 100644 index 00000000000..2f6798152cf --- /dev/null +++ b/drivers/staging/hv/blkvsc_drv.c @@ -0,0 +1,1547 @@ +/* + * + * Copyright (c) 2009, Microsoft Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Authors: + * Hank Janssen <hjanssen@microsoft.com> + * + */ + + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/blkdev.h> +#include <linux/major.h> +#include <linux/delay.h> +#include <linux/hdreg.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_dbg.h> + +#include "logging.h" +#include "vmbus.h" + +#include "StorVscApi.h" + +// +// #defines +// +#define BLKVSC_MINORS 64 + +// +// Data types +// +enum blkvsc_device_type { + UNKNOWN_DEV_TYPE, + HARDDISK_TYPE, + DVD_TYPE, +}; + +// This request ties the struct request and struct blkvsc_request/STORVSC_REQUEST together +// A struct request may be represented by 1 or more struct blkvsc_request +struct blkvsc_request_group { + int outstanding; + int status; + + struct list_head blkvsc_req_list; // list of blkvsc_requests +}; + + +struct blkvsc_request { + struct list_head req_entry; // blkvsc_request_group.blkvsc_req_list + + struct list_head pend_entry; // block_device_context.pending_list + + struct request *req; // This may be null if we generate a request internally + struct block_device_context *dev; + struct blkvsc_request_group *group; // The group this request is part of. Maybe null + + wait_queue_head_t wevent; + int cond; + + int write; + sector_t sector_start; + unsigned long sector_count; + + unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; + unsigned char cmd_len; + unsigned char cmnd[MAX_COMMAND_SIZE]; + + STORVSC_REQUEST request; + // !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because - + // The extension buffer falls right here and is pointed to by request.Extension; +}; + +// Per device structure +struct block_device_context { + struct device_context *device_ctx; // point back to our device context + struct kmem_cache *request_pool; + spinlock_t lock; + struct gendisk *gd; + enum blkvsc_device_type device_type; + struct list_head pending_list; + + unsigned char device_id[64]; + unsigned int device_id_len; + int num_outstanding_reqs; + int shutting_down; + int media_not_present; + unsigned int sector_size; + sector_t capacity; + unsigned int port; + unsigned char path; + unsigned char target; + int users; +}; + +// Per driver +struct blkvsc_driver_context { + // !! These must be the first 2 fields !! + struct driver_context drv_ctx; + STORVSC_DRIVER_OBJECT drv_obj; +}; + +// Static decl +static int blkvsc_probe(struct device *dev); +static int blkvsc_remove(struct device *device); +static void blkvsc_shutdown(struct device *device); + +static int blkvsc_open(struct inode *inode, struct file *filep); +static int blkvsc_release(struct inode *inode, struct file *filep); +static int blkvsc_media_changed(struct gendisk *gd); +static int blkvsc_revalidate_disk(struct gendisk *gd); +static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg); +static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg); + +static void blkvsc_request(struct request_queue *queue); +static void blkvsc_request_completion(STORVSC_REQUEST* request); +static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req); +static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) ); +static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req); +static void blkvsc_cmd_completion(STORVSC_REQUEST* request); +static int blkvsc_do_inquiry(struct block_device_context *blkdev); +static int blkvsc_do_read_capacity(struct block_device_context *blkdev); +static int blkvsc_do_read_capacity16(struct block_device_context *blkdev); +static int blkvsc_do_flush(struct block_device_context *blkdev); +static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev); +static int blkvsc_do_pending_reqs(struct block_device_context *blkdev); + + +static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE; + +// The one and only one +static struct blkvsc_driver_context g_blkvsc_drv; + + +static struct block_device_operations block_ops = +{ + .owner = THIS_MODULE, + .open = blkvsc_open, + .release = blkvsc_release, + .media_changed = blkvsc_media_changed, + .revalidate_disk = blkvsc_revalidate_disk, + .getgeo = blkvsc_getgeo, + .ioctl = blkvsc_ioctl, +}; + +/*++ + +Name: blkvsc_drv_init() + +Desc: BlkVsc driver initialization. + +--*/ +int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init) +{ + int ret=0; + STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj; + struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx; + + DPRINT_ENTER(BLKVSC_DRV); + + vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface); + + storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size; + + // Callback to client driver to complete the initialization + pfn_drv_init(&storvsc_drv_obj->Base); + + drv_ctx->driver.name = storvsc_drv_obj->Base.name; + memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID)); + +#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9) + drv_ctx->driver.probe = blkvsc_probe; + drv_ctx->driver.remove = blkvsc_remove; +#else + drv_ctx->probe = blkvsc_probe; + drv_ctx->remove = blkvsc_remove; + drv_ctx->shutdown = blkvsc_shutdown; +#endif + + // The driver belongs to vmbus + vmbus_child_driver_register(drv_ctx); + + DPRINT_EXIT(BLKVSC_DRV); + + return ret; +} + + +static int blkvsc_drv_exit_cb(struct device *dev, void *data) +{ + struct device **curr = (struct device **)data; + *curr = dev; + return 1; // stop iterating +} + +/*++ + +Name: blkvsc_drv_exit() + +Desc: + +--*/ +void blkvsc_drv_exit(void) +{ + STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj; + struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx; + + struct device *current_dev=NULL; + +#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9) +#define driver_for_each_device(drv, start, data, fn) \ + struct list_head *ptr, *n; \ + list_for_each_safe(ptr, n, &((drv)->devices)) {\ + struct device *curr_dev;\ + curr_dev = list_entry(ptr, struct device, driver_list);\ + fn(curr_dev, data);\ + } +#endif // KERNEL_2_6_9 + + DPRINT_ENTER(BLKVSC_DRV); + + while (1) + { + current_dev = NULL; + + // Get the device + driver_for_each_device(&drv_ctx->driver, NULL, (void*)¤t_dev, blkvsc_drv_exit_cb); + + if (current_dev == NULL) + break; + + // Initiate removal from the top-down + device_unregister(current_dev); + } + + if (storvsc_drv_obj->Base.OnCleanup) + storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base); + + vmbus_child_driver_unregister(drv_ctx); + + DPRINT_EXIT(BLKVSC_DRV); + + return; +} + +/*++ + +Name: blkvsc_probe() + +Desc: Add a new device for this driver + +--*/ +static int blkvsc_probe(struct device *device) +{ + int ret=0; + + struct driver_context *driver_ctx = driver_to_driver_context(device->driver); + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx; + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj; + + struct device_context *device_ctx = device_to_device_context(device); + DEVICE_OBJECT* device_obj = &device_ctx->device_obj; + + struct block_device_context *blkdev=NULL; + STORVSC_DEVICE_INFO device_info; + int major=0; + int devnum=0; + + static int ide0_registered=0; + static int ide1_registered=0; + + DPRINT_ENTER(BLKVSC_DRV); + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter"); + + if (!storvsc_drv_obj->Base.OnDeviceAdd) + { + DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set"); + + ret = -1; + goto Cleanup; + } + + blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL); + if (!blkdev) + { + ret = -ENOMEM; + goto Cleanup; + } + + INIT_LIST_HEAD(&blkdev->pending_list); + + // Initialize what we can here + spin_lock_init(&blkdev->lock); + + ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request)); + +#ifdef KERNEL_2_6_27 + blkdev->request_pool = kmem_cache_create(device_ctx->device.bus_id, + sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0, + SLAB_HWCACHE_ALIGN, NULL); +#else + blkdev->request_pool = kmem_cache_create(device_ctx->device.bus_id, + sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); +#endif + if (!blkdev->request_pool) + { + ret = -ENOMEM; + goto Cleanup; + } + + + // Call to the vsc driver to add the device + ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info); + if (ret != 0) + { + DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device"); + goto Cleanup; + } + + blkdev->device_ctx = device_ctx; + blkdev->target = device_info.TargetId; // this identified the device 0 or 1 + blkdev->path = device_info.PathId; // this identified the ide ctrl 0 or 1 + + device->driver_data = blkdev; + + // Calculate the major and device num + if (blkdev->path == 0) + { + major = IDE0_MAJOR; + devnum = blkdev->path + blkdev->target; // 0 or 1 + + if (!ide0_registered) + { + ret = register_blkdev(major, "ide"); + if (ret != 0) + { + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret); + goto Remove; + } + + ide0_registered = 1; + } + } + else if (blkdev->path == 1) + { + major = IDE1_MAJOR; + devnum = blkdev->path + blkdev->target + 1; // 2 or 3 + + if (!ide1_registered) + { + ret = register_blkdev(major, "ide"); + if (ret != 0) + { + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret); + goto Remove; + } + + ide1_registered = 1; + } + + } + else + { + DPRINT_ERR(BLKVSC_DRV, "invalid pathid"); + ret = -1; + goto Cleanup; + } + + DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major); + + blkdev->gd = alloc_disk(BLKVSC_MINORS); + if (!blkdev->gd) + { + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret); + ret = -1; + goto Cleanup; + } + + blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); + + blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); + blk_queue_max_phys_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); + blk_queue_max_hw_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); + blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); + blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); + blk_queue_dma_alignment(blkdev->gd->queue, 511); + + blkdev->gd->major = major; + if (devnum == 1 || devnum == 3) + blkdev->gd->first_minor = BLKVSC_MINORS; + else + blkdev->gd->first_minor = 0; + blkdev->gd->fops = &block_ops; + blkdev->gd->private_data = blkdev; + sprintf(blkdev->gd->disk_name, "hd%c", 'a'+ devnum); + + blkvsc_do_inquiry(blkdev); + if (blkdev->device_type == DVD_TYPE) + { + set_disk_ro(blkdev->gd, 1); + blkdev->gd->flags |= GENHD_FL_REMOVABLE; + blkvsc_do_read_capacity(blkdev); + } + else + { + blkvsc_do_read_capacity16(blkdev); + } + + set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512)); + blk_queue_hardsect_size(blkdev->gd->queue, blkdev->sector_size); + // go! + add_disk(blkdev->gd); + + DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %llu sector_size %d", blkdev->gd->disk_name, blkdev->capacity, blkdev->sector_size); + + return ret; + +Remove: + storvsc_drv_obj->Base.OnDeviceRemove(device_obj); + +Cleanup: + if (blkdev) + { + if (blkdev->request_pool) + { + kmem_cache_destroy(blkdev->request_pool); + blkdev->request_pool = NULL; + } + kfree(blkdev); + blkdev = NULL; + } + + DPRINT_EXIT(BLKVSC_DRV); + + return ret; +} + +static void blkvsc_shutdown(struct device *device) +{ + struct block_device_context *blkdev = (struct block_device_context*)device->driver_data; + unsigned long flags; + + if (!blkdev) + return; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); + + spin_lock_irqsave(&blkdev->lock, flags); + + blkdev->shutting_down = 1; + + blk_stop_queue(blkdev->gd->queue); + + spin_unlock_irqrestore(&blkdev->lock, flags); + + while (blkdev->num_outstanding_reqs) + { + DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs); + + udelay(100); + } + + blkvsc_do_flush(blkdev); + + spin_lock_irqsave(&blkdev->lock, flags); + + blkvsc_cancel_pending_reqs(blkdev); + + spin_unlock_irqrestore(&blkdev->lock, flags); +} + +static int blkvsc_do_flush(struct block_device_context *blkdev) +{ + struct blkvsc_request *blkvsc_req=NULL; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n"); + + if (blkdev->device_type != HARDDISK_TYPE) + return 0; + + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); + if (!blkvsc_req) + { + return -ENOMEM; + } + + memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); + init_waitqueue_head(&blkvsc_req->wevent); + blkvsc_req->dev = blkdev; + blkvsc_req->req = NULL; + blkvsc_req->write = 0; + + blkvsc_req->request.DataBuffer.PfnArray[0] = 0; + blkvsc_req->request.DataBuffer.Offset = 0; + blkvsc_req->request.DataBuffer.Length = 0; + + blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE; + blkvsc_req->cmd_len = 10; + + // Set this here since the completion routine may be invoked and completed before we return + blkvsc_req->cond =0; + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); + + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); + + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + + return 0; +} + +// Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) +static int blkvsc_do_inquiry(struct block_device_context *blkdev) +{ + struct blkvsc_request *blkvsc_req=NULL; + struct page *page_buf; + unsigned char *buf; + unsigned char device_type; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n"); + + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); + if (!blkvsc_req) + { + return -ENOMEM; + } + + memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); + page_buf = alloc_page(GFP_KERNEL); + if (!page_buf) + { + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + return -ENOMEM; + } + + init_waitqueue_head(&blkvsc_req->wevent); + blkvsc_req->dev = blkdev; + blkvsc_req->req = NULL; + blkvsc_req->write = 0; + + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf); + blkvsc_req->request.DataBuffer.Offset = 0; + blkvsc_req->request.DataBuffer.Length = 64; + + blkvsc_req->cmnd[0] = INQUIRY; + blkvsc_req->cmnd[1] = 0x1; // Get product data + blkvsc_req->cmnd[2] = 0x83; // mode page 83 + blkvsc_req->cmnd[4] = 64; + blkvsc_req->cmd_len = 6; + + // Set this here since the completion routine may be invoked and completed before we return + blkvsc_req->cond =0; + + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); + + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond); + + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); + + buf = kmap(page_buf); + + //PrintBytes(buf, 64); + // be to le + device_type = buf[0] & 0x1F; + + if (device_type == 0x0) + { + blkdev->device_type = HARDDISK_TYPE; + } + else if (device_type == 0x5) + { + blkdev->device_type = DVD_TYPE; + } + else + { + // TODO: this is currently unsupported device type + blkdev->device_type = UNKNOWN_DEV_TYPE; + } + + DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type); + + blkdev->device_id_len = buf[7]; + if (blkdev->device_id_len > 64) + blkdev->device_id_len = 64; + + memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len); + //PrintBytes(blkdev->device_id, blkdev->device_id_len); + + kunmap(page_buf); + + __free_page(page_buf); + + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + + return 0; +} + +// Do a scsi READ_CAPACITY cmd here to get the size of the disk +static int blkvsc_do_read_capacity(struct block_device_context *blkdev) +{ + struct blkvsc_request *blkvsc_req=NULL; + struct page *page_buf; + unsigned char *buf; + struct scsi_sense_hdr sense_hdr; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n"); + + blkdev->sector_size = 0; + blkdev->capacity = 0; + blkdev->media_not_present = 0; // assume a disk is present + + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); + if (!blkvsc_req) + { + return -ENOMEM; + } + + memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); + page_buf = alloc_page(GFP_KERNEL); + if (!page_buf) + { + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + return -ENOMEM; + } + + init_waitqueue_head(&blkvsc_req->wevent); + blkvsc_req->dev = blkdev; + blkvsc_req->req = NULL; + blkvsc_req->write = 0; + + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf); + blkvsc_req->request.DataBuffer.Offset = 0; + blkvsc_req->request.DataBuffer.Length = 8; + + blkvsc_req->cmnd[0] = READ_CAPACITY; + blkvsc_req->cmd_len = 16; + + // Set this here since the completion routine may be invoked and completed before we return + blkvsc_req->cond =0; + + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); + + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond); + + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); + + // check error + if (blkvsc_req->request.Status) + { + scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr); + + if (sense_hdr.asc == 0x3A) // Medium not present + { + blkdev->media_not_present = 1; + } + + return 0; + } + buf = kmap(page_buf); + + // be to le + blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; + blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; + + kunmap(page_buf); + + __free_page(page_buf); + + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + + return 0; +} + + +static int blkvsc_do_read_capacity16(struct block_device_context *blkdev) +{ + struct blkvsc_request *blkvsc_req=NULL; + struct page *page_buf; + unsigned char *buf; + struct scsi_sense_hdr sense_hdr; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n"); + + blkdev->sector_size = 0; + blkdev->capacity = 0; + blkdev->media_not_present = 0; // assume a disk is present + + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); + if (!blkvsc_req) + { + return -ENOMEM; + } + + memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); + page_buf = alloc_page(GFP_KERNEL); + if (!page_buf) + { + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + return -ENOMEM; + } + + init_waitqueue_head(&blkvsc_req->wevent); + blkvsc_req->dev = blkdev; + blkvsc_req->req = NULL; + blkvsc_req->write = 0; + + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf); + blkvsc_req->request.DataBuffer.Offset = 0; + blkvsc_req->request.DataBuffer.Length = 12; + + blkvsc_req->cmnd[0] = 0x9E; //READ_CAPACITY16; + blkvsc_req->cmd_len = 16; + + // Set this here since the completion routine may be invoked and completed before we return + blkvsc_req->cond =0; + + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); + + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond); + + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); + + // check error + if (blkvsc_req->request.Status) + { + scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr); + + if (sense_hdr.asc == 0x3A) // Medium not present + { + blkdev->media_not_present = 1; + } + + return 0; + } + buf = kmap(page_buf); + + // be to le + blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1; + blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]); + + //blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; + //blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; + + kunmap(page_buf); + + __free_page(page_buf); + + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + + return 0; +} + +/*++ + +Name: blkvsc_remove() + +Desc: Callback when our device is removed + +--*/ +static int blkvsc_remove(struct device *device) +{ + int ret=0; + + struct driver_context *driver_ctx = driver_to_driver_context(device->driver); + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx; + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj; + + struct device_context *device_ctx = device_to_device_context(device); + DEVICE_OBJECT* device_obj = &device_ctx->device_obj; + struct block_device_context *blkdev = (struct block_device_context*)device->driver_data; + unsigned long flags; + + DPRINT_ENTER(BLKVSC_DRV); + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n"); + + if (!storvsc_drv_obj->Base.OnDeviceRemove) + { + DPRINT_EXIT(BLKVSC_DRV); + return -1; + } + + // Call to the vsc driver to let it know that the device is being removed + ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj); + if (ret != 0) + { + // TODO: + DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret); + } + + // Get to a known state + spin_lock_irqsave(&blkdev->lock, flags); + + blkdev->shutting_down = 1; + + blk_stop_queue(blkdev->gd->queue); + + spin_unlock_irqrestore(&blkdev->lock, flags); + + while (blkdev->num_outstanding_reqs) + { + DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs); + + udelay(100); + } + + blkvsc_do_flush(blkdev); + + spin_lock_irqsave(&blkdev->lock, flags); + + blkvsc_cancel_pending_reqs(blkdev); + + spin_unlock_irqrestore(&blkdev->lock, flags); + + blk_cleanup_queue(blkdev->gd->queue); + + del_gendisk(blkdev->gd); + + kmem_cache_destroy(blkdev->request_pool); + + kfree(blkdev); + + DPRINT_EXIT(BLKVSC_DRV); + + return ret; +} + +static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req) +{ + ASSERT(blkvsc_req->req); + ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8)); + + blkvsc_req->cmd_len = 16; + + if (blkvsc_req->sector_start > 0xffffffff) + { + if (rq_data_dir(blkvsc_req->req)) + { + blkvsc_req->write = 1; + blkvsc_req->cmnd[0] = WRITE_16; + } + else + { + blkvsc_req->write = 0; + blkvsc_req->cmnd[0] = READ_16; + } + + blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; + + *(unsigned long long*)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start); + *(unsigned int*)&blkvsc_req->cmnd[10] = cpu_to_be32(blkvsc_req->sector_count); + } + else if ((blkvsc_req->sector_count > 0xff) || (blkvsc_req->sector_start > 0x1fffff)) + { + if (rq_data_dir(blkvsc_req->req)) + { + blkvsc_req->write = 1; + blkvsc_req->cmnd[0] = WRITE_10; + } + else + { + blkvsc_req->write = 0; + blkvsc_req->cmnd[0] = READ_10; + } + + blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; + + *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start); + *(unsigned short*)&blkvsc_req->cmnd[7] = cpu_to_be16(blkvsc_req->sector_count); + } + else + { + if (rq_data_dir(blkvsc_req->req)) + { + blkvsc_req->write = 1; + blkvsc_req->cmnd[0] = WRITE_6; + } + else + { + blkvsc_req->write = 0; + blkvsc_req->cmnd[0] = READ_6; + } + + *(unsigned int *)&blkvsc_req->cmnd[1] = cpu_to_be32(blkvsc_req->sector_start) >> 8; + blkvsc_req->cmnd[1] &= 0x1f; + blkvsc_req->cmnd[4] = (unsigned char) blkvsc_req->sector_count; + } +} + +static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) ) +{ + struct block_device_context *blkdev = blkvsc_req->dev; + struct device_context *device_ctx=blkdev->device_ctx; + struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver); + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx; + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj; + int ret =0; + + STORVSC_REQUEST *storvsc_req; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p type %s start_sector %llu count %d offset %d len %d\n", + blkvsc_req, + (blkvsc_req->write)?"WRITE":"READ", + blkvsc_req->sector_start, + blkvsc_req->sector_count, + blkvsc_req->request.DataBuffer.Offset, + blkvsc_req->request.DataBuffer.Length); + + /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++) + { + DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n", + blkvsc_req, + i, + blkvsc_req->request.DataBuffer.PfnArray[i]); + }*/ + + storvsc_req = &blkvsc_req->request; + storvsc_req->Extension = (void*)((unsigned long)blkvsc_req + sizeof(struct blkvsc_request)); + + storvsc_req->Type = blkvsc_req->write? WRITE_TYPE : READ_TYPE; + + storvsc_req->OnIOCompletion = request_completion; + storvsc_req->Context = blkvsc_req; + + storvsc_req->Host = blkdev->port; + storvsc_req->Bus = blkdev->path; + storvsc_req->TargetId = blkdev->target; + storvsc_req->LunId = 0; // this is not really used at all + + storvsc_req->CdbLen = blkvsc_req->cmd_len; + storvsc_req->Cdb = blkvsc_req->cmnd; + + storvsc_req->SenseBuffer = blkvsc_req->sense_buffer; + storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE; + + ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj, &blkvsc_req->request); + if (ret == 0) + { + blkdev->num_outstanding_reqs++; + } + + return ret; +} + +// +// We break the request into 1 or more blkvsc_requests and submit them. +// If we cant submit them all, we put them on the pending_list. The +// blkvsc_request() will work on the pending_list. +// +static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req) +{ + struct bio *bio=NULL; + struct bio_vec *bvec=NULL; + struct bio_vec *prev_bvec=NULL; + + struct blkvsc_request *blkvsc_req=NULL; + struct blkvsc_request *tmp; + int databuf_idx=0; + int seg_idx=0; + + sector_t start_sector; + unsigned long num_sectors = 0; + int ret=0; + int pending=0; + struct blkvsc_request_group *group=NULL; + + DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %llu \n", blkdev, req, req->sector); + + // Create a group to tie req to list of blkvsc_reqs + group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); + if (!group) + { + return -ENOMEM; + } + + INIT_LIST_HEAD(&group->blkvsc_req_list); + group->outstanding = group->status = 0; + + start_sector = req->sector; + + // foreach bio in the request + if (req->bio) + for (bio = req->bio; bio; bio = bio->bi_next) + { + // Map this bio into an existing or new storvsc request + bio_for_each_segment (bvec, bio, seg_idx) + { + DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n", + req, bio, bvec, seg_idx, databuf_idx); + + // Get a new storvsc request + if ( (!blkvsc_req) || // 1st-time + (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) || + (bvec->bv_offset != 0) || // hole at the begin of page + (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) // hold at the end of page + { + // submit the prev one + if (blkvsc_req) + { + blkvsc_req->sector_start = start_sector; + sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9)); + + blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9); + + blkvsc_init_rw(blkvsc_req); + } + + // Create new blkvsc_req to represent the current bvec + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); + if (!blkvsc_req) + { + // free up everything + list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry) + { + list_del(&blkvsc_req->req_entry); + kmem_cache_free(blkdev->request_pool, blkvsc_req); + } + + kmem_cache_free(blkdev->request_pool, group); + return -ENOMEM; + } + + memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); + + blkvsc_req->dev = blkdev; + blkvsc_req->req = req; + blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset; + blkvsc_req->request.DataBuffer.Length = 0; + + // Add to the group + blkvsc_req->group = group; + blkvsc_req->group->outstanding++; + list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list); + + start_sector += num_sectors; + num_sectors = 0; + databuf_idx = 0; + } + + // Add the curr bvec/segment to the curr blkvsc_req + blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page); + blkvsc_req->request.DataBuffer.Length += bvec->bv_len; + + prev_bvec = bvec; + + databuf_idx++; + num_sectors += bvec->bv_len >> 9; + + } // bio_for_each_segment + + } // rq_for_each_bio + + // Handle the last one + if (blkvsc_req) + { + DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding); + + blkvsc_req->sector_start = start_sector; + sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9)); + + blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9); + + blkvsc_init_rw(blkvsc_req); + } + + list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) + { + if (pending) + { + DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %llu sect_count %d (%llu %d)\n", + blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors); + + list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list); + } + else + { + ret = blkvsc_submit_request(blkvsc_req, blkvsc_request_completion); + if (ret == -1) + { + pending = 1; + list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list); + } + + DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p start_sect %llu sect_count %d (%llu %d) ret %d\n", + blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors, ret); + } + } + + return pending; +} + +static void blkvsc_cmd_completion(STORVSC_REQUEST* request) +{ + struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context; + struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev; + + struct scsi_sense_hdr sense_hdr; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", blkvsc_req); + + blkdev->num_outstanding_reqs--; + + if (blkvsc_req->request.Status) + { + if (scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr)) + { + scsi_print_sense_hdr("blkvsc", &sense_hdr); + } + } + + blkvsc_req->cond =1; + wake_up_interruptible(&blkvsc_req->wevent); +} + +static void blkvsc_request_completion(STORVSC_REQUEST* request) +{ + struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context; + struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev; + unsigned long flags; + struct blkvsc_request *comp_req, *tmp; + + ASSERT(blkvsc_req->group); + + DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s sect_start %llu sect_count %d len %d group outstd %d total outstd %d\n", + blkdev, + blkvsc_req, + blkvsc_req->group, + (blkvsc_req->write)?"WRITE":"READ", + blkvsc_req->sector_start, + blkvsc_req->sector_count, + blkvsc_req->request.DataBuffer.Length, + blkvsc_req->group->outstanding, + blkdev->num_outstanding_reqs); + + spin_lock_irqsave(&blkdev->lock, flags); + + blkdev->num_outstanding_reqs--; + blkvsc_req->group->outstanding--; + + // Only start processing when all the blkvsc_reqs are completed. This guarantees no out-of-order + // blkvsc_req completion when calling end_that_request_first() + if (blkvsc_req->group->outstanding == 0) + { + list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry) + { + DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n", + comp_req, + comp_req->sector_start, + comp_req->sector_count); + + list_del(&comp_req->req_entry); + +#ifdef KERNEL_2_6_27 + if (!__blk_end_request( + comp_req->req, + (!comp_req->request.Status ? 0: -EIO), + comp_req->sector_count * blkdev->sector_size)) + { + //All the sectors have been xferred ie the request is done + DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req); + kmem_cache_free(blkdev->request_pool, comp_req->group); + } +#else + if (!end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9)))) + { + //All the sectors have been xferred ie the request is done + DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req); + + end_that_request_last(comp_req->req, !comp_req->request.Status); + + kmem_cache_free(blkdev->request_pool, comp_req->group); + } +#endif + + kmem_cache_free(blkdev->request_pool, comp_req); + } + + if (!blkdev->shutting_down) + { + blkvsc_do_pending_reqs(blkdev); + blk_start_queue(blkdev->gd->queue); + blkvsc_request(blkdev->gd->queue); + } + } + + spin_unlock_irqrestore(&blkdev->lock, flags); +} + +static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev) +{ + struct blkvsc_request *pend_req, *tmp; + struct blkvsc_request *comp_req, *tmp2; + + int ret=0; + + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()"); + + // Flush the pending list first + list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry) + { + // The pend_req could be part of a partially completed request. If so, complete those req first + // until we hit the pend_req + list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry) + { + DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n", + comp_req, + comp_req->sector_start, + comp_req->sector_count); + + if (comp_req == pend_req) + break; + + list_del(&comp_req->req_entry); + + if (comp_req->req) + { +#ifdef KERNEL_2_6_27 + ret = __blk_end_request( + comp_req->req, + (!comp_req->request.Status ? 0 : -EIO), + comp_req->sector_count * blkdev->sector_size); +#else + ret = end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9))); +#endif + ASSERT(ret != 0); + } + + kmem_cache_free(blkdev->request_pool, comp_req); + } + + DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", pend_req); + + list_del(&pend_req->pend_entry); + + list_del(&pend_req->req_entry); + + if (comp_req->req) + { +#ifdef KERNEL_2_6_27 + if (!__blk_end_request( + pend_req->req, + -EIO, + pend_req->sector_count * blkdev->sector_size)) + { + //All the sectors have been xferred ie the request is done + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req); + kmem_cache_free(blkdev->request_pool, pend_req->group); + } +#else + if (!end_that_request_first(pend_req->req, 0, (pend_req->sector_count * (blkdev->sector_size >> 9)))) + { + //All the sectors have been xferred ie the request is done + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req); + + end_that_request_last(pend_req->req, 0); + + kmem_cache_free(blkdev->request_pool, pend_req->group); + } +#endif + } + + kmem_cache_free(blkdev->request_pool, pend_req); + } + + return ret; +} + +static int blkvsc_do_pending_reqs(struct block_device_context *blkdev) +{ + struct blkvsc_request *pend_req, *tmp; + int ret=0; + + // Flush the pending list first + list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry) + { + DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req); + + ret = blkvsc_submit_request(pend_req, blkvsc_request_completion); + if (ret != 0) + { + break; + } + else + { + list_del(&pend_req->pend_entry); + } + } + + return ret; +} + +static void blkvsc_request(struct request_queue *queue) +{ + struct block_device_context *blkdev = NULL; + struct request *req; + int ret=0; + + DPRINT_DBG(BLKVSC_DRV, "- enter \n"); + while ((req = elv_next_request(queue)) != NULL) + { + DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req); + + blkdev = req->rq_disk->private_data; + if (blkdev->shutting_down || !blk_fs_request(req) || blkdev->media_not_present) { + end_request(req, 0); + continue; + } + + ret = blkvsc_do_pending_reqs(blkdev); + + if (ret != 0) + { + DPRINT_DBG(BLKVSC_DRV, "- stop queue - pending_list not empty\n"); + blk_stop_queue(queue); + break; + } + + blkdev_dequeue_request(req); + + ret = blkvsc_do_request(blkdev, req); + if (ret > 0) + { + DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n"); + blk_stop_queue(queue); + break; + } + else if (ret < 0) + { + DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n"); + blk_requeue_request(queue, req); + blk_stop_queue(queue); + break; + } + } +} + +static int blkvsc_open(struct inode *inode, struct file *filep) +{ + struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data; + + DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); + + spin_lock(&blkdev->lock); + + if (!blkdev->users && blkdev->device_type == DVD_TYPE) + { + spin_unlock(&blkdev->lock); + check_disk_change(inode->i_bdev); + spin_lock(&blkdev->lock); + } + + blkdev->users++; + + spin_unlock(&blkdev->lock); + return 0; +} + +static int blkvsc_release(struct inode *inode, struct file *filep) +{ + struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data; + + DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); + + spin_lock(&blkdev->lock); + if (blkdev->users == 1) + { + spin_unlock(&blkdev->lock); + blkvsc_do_flush(blkdev); + spin_lock(&blkdev->lock); + } + + blkdev->users--; + + spin_unlock(&blkdev->lock); + return 0; +} + +static int blkvsc_media_changed(struct gendisk *gd) +{ + DPRINT_DBG(BLKVSC_DRV, "- enter\n"); + + return 1; +} + +static int blkvsc_revalidate_disk(struct gendisk *gd) +{ + struct block_device_context *blkdev = gd->private_data; + + DPRINT_DBG(BLKVSC_DRV, "- enter\n"); + + if (blkdev->device_type == DVD_TYPE) + { + blkvsc_do_read_capacity(blkdev); + set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512)); + blk_queue_hardsect_size(gd->queue, blkdev->sector_size); + } + return 0; +} + +int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg) +{ + sector_t total_sectors = get_capacity(bd->bd_disk); + sector_t cylinder_times_heads=0; + sector_t temp=0; + + int sectors_per_track=0; + int heads=0; + int cylinders=0; + int rem=0; + + if (total_sectors > (65535 * 16 * 255)) { + total_sectors = (65535 * 16 * 255); + } + + if (total_sectors >= (65535 * 16 * 63)) { + sectors_per_track = 255; + heads = 16; + + cylinder_times_heads = total_sectors; + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads + } + else + { + sectors_per_track = 17; + + cylinder_times_heads = total_sectors; + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads + + temp = cylinder_times_heads + 1023; + rem = sector_div(temp, 1024); // sector_div stores the quotient in temp + + heads = temp; + + if (heads < 4) { + heads = 4; + } + + if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) { + sectors_per_track = 31; + heads = 16; + + cylinder_times_heads = total_sectors; + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads + } + + if (cylinder_times_heads >= (heads * 1024)) { + sectors_per_track = 63; + heads = 16; + + cylinder_times_heads = total_sectors; + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads + } + } + + temp = cylinder_times_heads; + rem = sector_div(temp, heads); // sector_div stores the quotient in temp + cylinders = temp; + + hg->heads = heads; + hg->sectors = sectors_per_track; + hg->cylinders = cylinders; + + DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track); + + return 0; +} + +static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg) +{ + struct block_device *bd = inode->i_bdev; + struct block_device_context *blkdev = bd->bd_disk->private_data; + int ret=0; + + switch (cmd) + { + // TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just + // a GUID. Commented it out for now. + /*case HDIO_GET_IDENTITY: + DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n"); + + if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len)) + { + ret = -EFAULT; + } + + break;*/ + default: + ret = -EINVAL; + break; + } + + return ret; +} + + +MODULE_LICENSE("GPL"); + +static int __init blkvsc_init(void) +{ + int ret; + + ASSERT(sizeof(sector_t) == 8); // Make sure CONFIG_LBD is set + + DPRINT_ENTER(BLKVSC_DRV); + + DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing...."); + + ret = blkvsc_drv_init(BlkVscInitialize); + + DPRINT_EXIT(BLKVSC_DRV); + + return ret; +} + +static void __exit blkvsc_exit(void) +{ + DPRINT_ENTER(BLKVSC_DRV); + + blkvsc_drv_exit(); + + DPRINT_ENTER(BLKVSC_DRV); +} + +module_param(blkvsc_ringbuffer_size, int, S_IRUGO); + +module_init(blkvsc_init); +module_exit(blkvsc_exit); + +// eof |