aboutsummaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/Kconfig75
-rw-r--r--drivers/block/Makefile7
-rw-r--r--drivers/block/acsi.c1825
-rw-r--r--drivers/block/acsi_slm.c1032
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/block/cciss_scsi.c75
-rw-r--r--drivers/block/lguest_blk.c275
-rw-r--r--drivers/block/loop.c75
-rw-r--r--drivers/block/nbd.c24
-rw-r--r--drivers/block/pktcdvd.c7
-rw-r--r--drivers/block/ps3disk.c630
-rw-r--r--drivers/block/sunvdc.c887
-rw-r--r--drivers/block/sx8.c3
-rw-r--r--drivers/block/ub.c14
-rw-r--r--drivers/block/umem.c58
-rw-r--r--drivers/block/viodasd.c4
-rw-r--r--drivers/block/xen-blkfront.c988
-rw-r--r--drivers/block/xsysace.c1164
-rw-r--r--drivers/block/z2ram.c7
21 files changed, 4074 insertions, 3086 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index b4c8319138b..a4a31199240 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -2,9 +2,12 @@
# Block device driver configuration
#
-if BLOCK
+menuconfig BLK_DEV
+ bool "Block devices"
+ depends on BLOCK
+ default y
-menu "Block devices"
+if BLK_DEV
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -56,48 +59,6 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
-config ATARI_ACSI
- tristate "Atari ACSI support"
- depends on ATARI && BROKEN
- ---help---
- This enables support for the Atari ACSI interface. The driver
- supports hard disks and CD-ROMs, which have 512-byte sectors, or can
- be switched to that mode. Due to the ACSI command format, only disks
- up to 1 GB are supported. Special support for certain ACSI to SCSI
- adapters, which could relax that, isn't included yet. The ACSI
- driver is also the basis for certain other drivers for devices
- attached to the ACSI bus: Atari SLM laser printer, BioNet-100
- Ethernet, and PAMsNet Ethernet. If you want to use one of these
- devices, you need ACSI support, too.
-
- To compile this driver as a module, choose M here: the
- module will be called acsi.
-
-comment "Some devices (e.g. CD jukebox) support multiple LUNs"
- depends on ATARI && ATARI_ACSI
-
-config ACSI_MULTI_LUN
- bool "Probe all LUNs on each ACSI device"
- depends on ATARI_ACSI
- help
- If you have a ACSI device that supports more than one LUN (Logical
- Unit Number), e.g. a CD jukebox, you should say Y here so that all
- will be found by the ACSI driver. An ACSI device with multiple LUNs
- acts logically like multiple ACSI devices. The vast majority of ACSI
- devices have only one LUN, and so most people can say N here and
- should in fact do so, because it is safer.
-
-config ATARI_SLM
- tristate "Atari SLM laser printer support"
- depends on ATARI && ATARI_ACSI!=n
- help
- If you have an Atari SLM laser printer, say Y to include support for
- it in the kernel. Otherwise, say N. This driver is also available as
- a module ( = code which can be inserted in and removed from the
- running kernel whenever you want). The module will be called
- acsi_slm. Be warned: the driver needs much ST-RAM and can cause
- problems due to that fact!
-
config BLK_DEV_XD
tristate "XT hard disk support"
depends on ISA && ISA_DMA_API
@@ -141,7 +102,7 @@ source "drivers/block/paride/Kconfig"
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
- depends on PCI
+ depends on PCI && VIRT_TO_BUS
help
This is the driver for Compaq Smart Array controllers. Everyone
using these boards should say Y here. See the file
@@ -451,8 +412,28 @@ config ATA_OVER_ETH
This driver provides Support for ATA over Ethernet block
devices like the Coraid EtherDrive (R) Storage Blade.
+config SUNVDC
+ tristate "Sun Virtual Disk Client support"
+ depends on SUN_LDOMS
+ help
+ Support for virtual disk devices as a client under Sun
+ Logical Domains.
+
source "drivers/s390/block/Kconfig"
-endmenu
+config XILINX_SYSACE
+ tristate "Xilinx SystemACE support"
+ depends on 4xx
+ help
+ Include support for the Xilinx SystemACE CompactFlash interface
+
+config XEN_BLKDEV_FRONTEND
+ tristate "Xen virtual block device support"
+ depends on XEN
+ default y
+ help
+ This driver implements the front-end of the Xen virtual
+ block device driver. It communicates with a back-end driver
+ in another domain which drives the actual block device.
-endif
+endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index dd88e33c1eb..a7a099027fc 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -8,9 +8,8 @@
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o
+obj-$(CONFIG_PS3_DISK) += ps3disk.o
obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
-obj-$(CONFIG_ATARI_ACSI) += acsi.o
-obj-$(CONFIG_ATARI_SLM) += acsi_slm.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += rd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
@@ -19,7 +18,9 @@ obj-$(CONFIG_BLK_DEV_XD) += xd.o
obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
+obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
+obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
@@ -29,3 +30,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
obj-$(CONFIG_BLK_DEV_UB) += ub.o
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
+obj-$(CONFIG_LGUEST_GUEST) += lguest_blk.o
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
deleted file mode 100644
index e3d9152e231..00000000000
--- a/drivers/block/acsi.c
+++ /dev/null
@@ -1,1825 +0,0 @@
-/*
- * acsi.c -- Device driver for Atari ACSI hard disks
- *
- * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
- *
- * Some parts are based on hd.c by Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- */
-
-/*
- * Still to in this file:
- * - If a command ends with an error status (!= 0), the following
- * REQUEST SENSE commands (4 to fill the ST-DMA FIFO) are done by
- * polling the _IRQ signal (not interrupt-driven). This should be
- * avoided in future because it takes up a non-neglectible time in
- * the interrupt service routine while interrupts are disabled.
- * Maybe a timer interrupt will get lost :-(
- */
-
-/*
- * General notes:
- *
- * - All ACSI devices (disks, CD-ROMs, ...) use major number 28.
- * Minors are organized like it is with SCSI: The upper 4 bits
- * identify the device, the lower 4 bits the partition.
- * The device numbers (the upper 4 bits) are given in the same
- * order as the devices are found on the bus.
- * - Up to 8 LUNs are supported for each target (if CONFIG_ACSI_MULTI_LUN
- * is defined), but only a total of 16 devices (due to minor
- * numbers...). Note that Atari allows only a maximum of 4 targets
- * (i.e. controllers, not devices) on the ACSI bus!
- * - A optimizing scheme similar to SCSI scatter-gather is implemented.
- * - Removable media are supported. After a medium change to device
- * is reinitialized (partition check etc.). Also, if the device
- * knows the PREVENT/ALLOW MEDIUM REMOVAL command, the door should
- * be locked and unlocked when mounting the first or unmounting the
- * last filesystem on the device. The code is untested, because I
- * don't have a removable hard disk.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/timer.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/genhd.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <scsi/scsi.h> /* for SCSI_IOCTL_GET_IDLUN */
-#include <scsi/scsi_ioctl.h>
-#include <linux/hdreg.h> /* for HDIO_GETGEO */
-#include <linux/blkpg.h>
-#include <linux/buffer_head.h>
-#include <linux/blkdev.h>
-
-#include <asm/setup.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atari_acsi.h>
-#include <asm/atari_stdma.h>
-#include <asm/atari_stram.h>
-
-static void (*do_acsi)(void) = NULL;
-static struct request_queue *acsi_queue;
-#define QUEUE (acsi_queue)
-#define CURRENT elv_next_request(acsi_queue)
-
-#define DEBUG
-#undef DEBUG_DETECT
-#undef NO_WRITE
-
-#define MAX_ERRORS 8 /* Max read/write errors/sector */
-#define MAX_LUN 8 /* Max LUNs per target */
-#define MAX_DEV 16
-
-#define ACSI_BUFFER_SIZE (16*1024) /* "normal" ACSI buffer size */
-#define ACSI_BUFFER_MINSIZE (2048) /* min. buf size if ext. DMA */
-#define ACSI_BUFFER_SIZE_ORDER 2 /* order size for above */
-#define ACSI_BUFFER_MINSIZE_ORDER 0 /* order size for above */
-#define ACSI_BUFFER_SECTORS (ACSI_BUFFER_SIZE/512)
-
-#define ACSI_BUFFER_ORDER \
- (ATARIHW_PRESENT(EXTD_DMA) ? \
- ACSI_BUFFER_MINSIZE_ORDER : \
- ACSI_BUFFER_SIZE_ORDER)
-
-#define ACSI_TIMEOUT (4*HZ)
-
-/* minimum delay between two commands */
-
-#define COMMAND_DELAY 500
-
-typedef enum {
- NONE, HARDDISK, CDROM
-} ACSI_TYPE;
-
-struct acsi_info_struct {
- ACSI_TYPE type; /* type of device */
- unsigned target; /* target number */
- unsigned lun; /* LUN in target controller */
- unsigned removable : 1; /* Flag for removable media */
- unsigned read_only : 1; /* Flag for read only devices */
- unsigned old_atari_disk : 1; /* Is an old Atari disk */
- unsigned changed : 1; /* Medium has been changed */
- unsigned long size; /* #blocks */
- int access_count;
-} acsi_info[MAX_DEV];
-
-/*
- * SENSE KEYS
- */
-
-#define NO_SENSE 0x00
-#define RECOVERED_ERROR 0x01
-#define NOT_READY 0x02
-#define MEDIUM_ERROR 0x03
-#define HARDWARE_ERROR 0x04
-#define ILLEGAL_REQUEST 0x05
-#define UNIT_ATTENTION 0x06
-#define DATA_PROTECT 0x07
-#define BLANK_CHECK 0x08
-#define COPY_ABORTED 0x0a
-#define ABORTED_COMMAND 0x0b
-#define VOLUME_OVERFLOW 0x0d
-#define MISCOMPARE 0x0e
-
-
-/*
- * DEVICE TYPES
- */
-
-#define TYPE_DISK 0x00
-#define TYPE_TAPE 0x01
-#define TYPE_WORM 0x04
-#define TYPE_ROM 0x05
-#define TYPE_MOD 0x07
-#define TYPE_NO_LUN 0x7f
-
-/* The data returned by MODE SENSE differ between the old Atari
- * hard disks and SCSI disks connected to ACSI. In the following, both
- * formats are defined and some macros to operate on them potably.
- */
-
-typedef struct {
- unsigned long dummy[2];
- unsigned long sector_size;
- unsigned char format_code;
-#define ATARI_SENSE_FORMAT_FIX 1
-#define ATARI_SENSE_FORMAT_CHNG 2
- unsigned char cylinders_h;
- unsigned char cylinders_l;
- unsigned char heads;
- unsigned char reduced_h;
- unsigned char reduced_l;
- unsigned char precomp_h;
- unsigned char precomp_l;
- unsigned char landing_zone;
- unsigned char steprate;
- unsigned char type;
-#define ATARI_SENSE_TYPE_FIXCHNG_MASK 4
-#define ATARI_SENSE_TYPE_SOFTHARD_MASK 8
-#define ATARI_SENSE_TYPE_FIX 4
-#define ATARI_SENSE_TYPE_CHNG 0
-#define ATARI_SENSE_TYPE_SOFT 0
-#define ATARI_SENSE_TYPE_HARD 8
- unsigned char sectors;
-} ATARI_SENSE_DATA;
-
-#define ATARI_CAPACITY(sd) \
- (((int)((sd).cylinders_h<<8)|(sd).cylinders_l) * \
- (sd).heads * (sd).sectors)
-
-
-typedef struct {
- unsigned char dummy1;
- unsigned char medium_type;
- unsigned char dummy2;
- unsigned char descriptor_size;
- unsigned long block_count;
- unsigned long sector_size;
- /* Page 0 data */
- unsigned char page_code;
- unsigned char page_size;
- unsigned char page_flags;
- unsigned char qualifier;
-} SCSI_SENSE_DATA;
-
-#define SCSI_CAPACITY(sd) ((sd).block_count & 0xffffff)
-
-
-typedef union {
- ATARI_SENSE_DATA atari;
- SCSI_SENSE_DATA scsi;
-} SENSE_DATA;
-
-#define SENSE_TYPE_UNKNOWN 0
-#define SENSE_TYPE_ATARI 1
-#define SENSE_TYPE_SCSI 2
-
-#define SENSE_TYPE(sd) \
- (((sd).atari.dummy[0] == 8 && \
- ((sd).atari.format_code == 1 || \
- (sd).atari.format_code == 2)) ? SENSE_TYPE_ATARI : \
- ((sd).scsi.dummy1 >= 11) ? SENSE_TYPE_SCSI : \
- SENSE_TYPE_UNKNOWN)
-
-#define CAPACITY(sd) \
- (SENSE_TYPE(sd) == SENSE_TYPE_ATARI ? \
- ATARI_CAPACITY((sd).atari) : \
- SCSI_CAPACITY((sd).scsi))
-
-#define SECTOR_SIZE(sd) \
- (SENSE_TYPE(sd) == SENSE_TYPE_ATARI ? \
- (sd).atari.sector_size : \
- (sd).scsi.sector_size & 0xffffff)
-
-/* Default size if capacity cannot be determined (1 GByte) */
-#define DEFAULT_SIZE 0x1fffff
-
-#define CARTRCH_STAT(aip,buf) \
- (aip->old_atari_disk ? \
- (((buf)[0] & 0x7f) == 0x28) : \
- ((((buf)[0] & 0x70) == 0x70) ? \
- (((buf)[2] & 0x0f) == 0x06) : \
- (((buf)[0] & 0x0f) == 0x06))) \
-
-/* These two are also exported to other drivers that work on the ACSI bus and
- * need an ST-RAM buffer. */
-char *acsi_buffer;
-unsigned long phys_acsi_buffer;
-
-static int NDevices;
-
-static int CurrentNReq;
-static int CurrentNSect;
-static char *CurrentBuffer;
-
-static DEFINE_SPINLOCK(acsi_lock);
-
-
-#define SET_TIMER() mod_timer(&acsi_timer, jiffies + ACSI_TIMEOUT)
-#define CLEAR_TIMER() del_timer(&acsi_timer)
-
-static unsigned long STramMask;
-#define STRAM_ADDR(a) (((a) & STramMask) == 0)
-
-
-
-/* ACSI commands */
-
-static char tur_cmd[6] = { 0x00, 0, 0, 0, 0, 0 };
-static char modesense_cmd[6] = { 0x1a, 0, 0, 0, 24, 0 };
-static char modeselect_cmd[6] = { 0x15, 0, 0, 0, 12, 0 };
-static char inquiry_cmd[6] = { 0x12, 0, 0, 0,255, 0 };
-static char reqsense_cmd[6] = { 0x03, 0, 0, 0, 4, 0 };
-static char read_cmd[6] = { 0x08, 0, 0, 0, 0, 0 };
-static char write_cmd[6] = { 0x0a, 0, 0, 0, 0, 0 };
-static char pa_med_rem_cmd[6] = { 0x1e, 0, 0, 0, 0, 0 };
-
-#define CMDSET_TARG_LUN(cmd,targ,lun) \
- do { \
- cmd[0] = (cmd[0] & ~0xe0) | (targ)<<5; \
- cmd[1] = (cmd[1] & ~0xe0) | (lun)<<5; \
- } while(0)
-
-#define CMDSET_BLOCK(cmd,blk) \
- do { \
- unsigned long __blk = (blk); \
- cmd[3] = __blk; __blk >>= 8; \
- cmd[2] = __blk; __blk >>= 8; \
- cmd[1] = (cmd[1] & 0xe0) | (__blk & 0x1f); \
- } while(0)
-
-#define CMDSET_LEN(cmd,len) \
- do { \
- cmd[4] = (len); \
- } while(0)
-
-/* ACSI errors (from REQUEST SENSE); There are two tables, one for the
- * old Atari disks and one for SCSI on ACSI disks.
- */
-
-struct acsi_error {
- unsigned char code;
- const char *text;
-} atari_acsi_errors[] = {
- { 0x00, "No error (??)" },
- { 0x01, "No index pulses" },
- { 0x02, "Seek not complete" },
- { 0x03, "Write fault" },
- { 0x04, "Drive not ready" },
- { 0x06, "No Track 00 signal" },
- { 0x10, "ECC error in ID field" },
- { 0x11, "Uncorrectable data error" },
- { 0x12, "ID field address mark not found" },
- { 0x13, "Data field address mark not found" },
- { 0x14, "Record not found" },
- { 0x15, "Seek error" },
- { 0x18, "Data check in no retry mode" },
- { 0x19, "ECC error during verify" },
- { 0x1a, "Access to bad block" },
- { 0x1c, "Unformatted or bad format" },
- { 0x20, "Invalid command" },
- { 0x21, "Invalid block address" },
- { 0x23, "Volume overflow" },
- { 0x24, "Invalid argument" },
- { 0x25, "Invalid drive number" },
- { 0x26, "Byte zero parity check" },
- { 0x28, "Cartride changed" },
- { 0x2c, "Error count overflow" },
- { 0x30, "Controller selftest failed" }
-},
-
- scsi_acsi_errors[] = {
- { 0x00, "No error (??)" },
- { 0x01, "Recovered error" },
- { 0x02, "Drive not ready" },
- { 0x03, "Uncorrectable medium error" },
- { 0x04, "Hardware error" },
- { 0x05, "Illegal request" },
- { 0x06, "Unit attention (Reset or cartridge changed)" },
- { 0x07, "Data protection" },
- { 0x08, "Blank check" },
- { 0x0b, "Aborted Command" },
- { 0x0d, "Volume overflow" }
-};
-
-
-
-/***************************** Prototypes *****************************/
-
-static int acsicmd_dma( const char *cmd, char *buffer, int blocks, int
- rwflag, int enable);
-static int acsi_reqsense( char *buffer, int targ, int lun);
-static void acsi_print_error(const unsigned char *errblk, struct acsi_info_struct *aip);
-static irqreturn_t acsi_interrupt (int irq, void *data);
-static void unexpected_acsi_interrupt( void );
-static void bad_rw_intr( void );
-static void read_intr( void );
-static void write_intr( void);
-static void acsi_times_out( unsigned long dummy );
-static void copy_to_acsibuffer( void );
-static void copy_from_acsibuffer( void );
-static void do_end_requests( void );
-static void do_acsi_request( request_queue_t * );
-static void redo_acsi_request( void );
-static int acsi_ioctl( struct inode *inode, struct file *file, unsigned int
- cmd, unsigned long arg );
-static int acsi_open( struct inode * inode, struct file * filp );
-static int acsi_release( struct inode * inode, struct file * file );
-static void acsi_prevent_removal(struct acsi_info_struct *aip, int flag );
-static int acsi_change_blk_size( int target, int lun);
-static int acsi_mode_sense( int target, int lun, SENSE_DATA *sd );
-static int acsi_revalidate (struct gendisk *disk);
-
-/************************* End of Prototypes **************************/
-
-
-DEFINE_TIMER(acsi_timer, acsi_times_out, 0, 0);
-
-
-#ifdef CONFIG_ATARI_SLM
-
-extern int attach_slm( int target, int lun );
-extern int slm_init( void );
-
-#endif
-
-
-
-/***********************************************************************
- *
- * ACSI primitives
- *
- **********************************************************************/
-
-
-/*
- * The following two functions wait for _IRQ to become Low or High,
- * resp., with a timeout. The 'timeout' parameter is in jiffies
- * (10ms).
- * If the functions are called with timer interrupts on (int level <
- * 6), the timeout is based on the 'jiffies' variable to provide exact
- * timeouts for device probing etc.
- * If interrupts are disabled, the number of tries is based on the
- * 'loops_per_jiffy' variable. A rough estimation is sufficient here...
- */
-
-#define INT_LEVEL \
- ({ unsigned __sr; \
- __asm__ __volatile__ ( "movew %/sr,%0" : "=dm" (__sr) ); \
- (__sr >> 8) & 7; \
- })
-
-int acsi_wait_for_IRQ( unsigned timeout )
-
-{
- if (INT_LEVEL < 6) {
- unsigned long maxjif = jiffies + timeout;
- while (time_before(jiffies, maxjif))
- if (!(mfp.par_dt_reg & 0x20)) return( 1 );
- }
- else {
- long tries = loops_per_jiffy / 8 * timeout;
- while( --tries >= 0 )
- if (!(mfp.par_dt_reg & 0x20)) return( 1 );
- }
- return( 0 ); /* timeout! */
-}
-
-
-int acsi_wait_for_noIRQ( unsigned timeout )
-
-{
- if (INT_LEVEL < 6) {
- unsigned long maxjif = jiffies + timeout;
- while (time_before(jiffies, maxjif))
- if (mfp.par_dt_reg & 0x20) return( 1 );
- }
- else {
- long tries = loops_per_jiffy * timeout / 8;
- while( tries-- >= 0 )
- if (mfp.par_dt_reg & 0x20) return( 1 );
- }
- return( 0 ); /* timeout! */
-}
-
-static struct timeval start_time;
-
-void
-acsi_delay_start(void)
-{
- do_gettimeofday(&start_time);
-}
-
-/* wait from acsi_delay_start to now usec (<1E6) usec */
-
-void
-acsi_delay_end(long usec)
-{
- struct timeval end_time;
- long deltau,deltas;
- do_gettimeofday(&end_time);
- deltau=end_time.tv_usec - start_time.tv_usec;
- deltas=end_time.tv_sec - start_time.tv_sec;
- if (deltas > 1 || deltas < 0)
- return;
- if (deltas > 0)
- deltau += 1000*1000;
- if (deltau >= usec)
- return;
- udelay(usec-deltau);
-}
-
-/* acsicmd_dma() sends an ACSI command and sets up the DMA to transfer
- * 'blocks' blocks of 512 bytes from/to 'buffer'.
- * Because the _IRQ signal is used for handshaking the command bytes,
- * the ACSI interrupt has to be disabled in this function. If the end
- * of the operation should be signalled by a real interrupt, it has to be
- * reenabled afterwards.
- */
-
-static int acsicmd_dma( const char *cmd, char *buffer, int blocks, int rwflag, int enable)
-
-{ unsigned long flags, paddr;
- int i;
-
-#ifdef NO_WRITE
- if (rwflag || *cmd == 0x0a) {
- printk( "ACSI: Write commands disabled!\n" );
- return( 0 );
- }
-#endif
-
- rwflag = rwflag ? 0x100 : 0;
- paddr = virt_to_phys( buffer );
-
- acsi_delay_end(COMMAND_DELAY);
- DISABLE_IRQ();
-
- local_irq_save(flags);
- /* Low on A1 */
- dma_wd.dma_mode_status = 0x88 | rwflag;
- MFPDELAY();
-
- /* set DMA address */
- dma_wd.dma_lo = (unsigned char)paddr;
- paddr >>= 8;
- MFPDELAY();
- dma_wd.dma_md = (unsigned char)paddr;
- paddr >>= 8;
- MFPDELAY();
- if (ATARIHW_PRESENT(EXTD_DMA))
- st_dma_ext_dmahi = (unsigned short)paddr;
- else
- dma_wd.dma_hi = (unsigned char)paddr;
- MFPDELAY();
- local_irq_restore(flags);
-
- /* send the command bytes except the last */
- for( i = 0; i < 5; ++i ) {
- DMA_LONG_WRITE( *cmd++, 0x8a | rwflag );
- udelay(20);
- if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
- }
-
- /* Clear FIFO and switch DMA to correct direction */
- dma_wd.dma_mode_status = 0x92 | (rwflag ^ 0x100);
- MFPDELAY();
- dma_wd.dma_mode_status = 0x92 | rwflag;
- MFPDELAY();
-
- /* How many sectors for DMA */
- dma_wd.fdc_acces_seccount = blocks;
- MFPDELAY();
-
- /* send last command byte */
- dma_wd.dma_mode_status = 0x8a | rwflag;
- MFPDELAY();
- DMA_LONG_WRITE( *cmd++, 0x0a | rwflag );
- if (enable)
- ENABLE_IRQ();
- udelay(80);
-
- return( 1 );
-}
-
-
-/*
- * acsicmd_nodma() sends an ACSI command that requires no DMA.
- */
-
-int acsicmd_nodma( const char *cmd, int enable)
-
-{ int i;
-
- acsi_delay_end(COMMAND_DELAY);
- DISABLE_IRQ();
-
- /* send first command byte */
- dma_wd.dma_mode_status = 0x88;
- MFPDELAY();
- DMA_LONG_WRITE( *cmd++, 0x8a );
- udelay(20);
- if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
-
- /* send the intermediate command bytes */
- for( i = 0; i < 4; ++i ) {
- DMA_LONG_WRITE( *cmd++, 0x8a );
- udelay(20);
- if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
- }
-
- /* send last command byte */
- DMA_LONG_WRITE( *cmd++, 0x0a );
- if (enable)
- ENABLE_IRQ();
- udelay(80);
-
- return( 1 );
- /* Note that the ACSI interrupt is still disabled after this
- * function. If you want to get the IRQ delivered, enable it manually!
- */
-}
-
-
-static int acsi_reqsense( char *buffer, int targ, int lun)
-
-{
- CMDSET_TARG_LUN( reqsense_cmd, targ, lun);
- if (!acsicmd_dma( reqsense_cmd, buffer, 1, 0, 0 )) return( 0 );
- if (!acsi_wait_for_IRQ( 10 )) return( 0 );
- acsi_getstatus();
- if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
- if (!acsi_wait_for_IRQ( 10 )) return( 0 );
- acsi_getstatus();
- if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
- if (!acsi_wait_for_IRQ( 10 )) return( 0 );
- acsi_getstatus();
- if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
- if (!acsi_wait_for_IRQ( 10 )) return( 0 );
- acsi_getstatus();
- dma_cache_maintenance( virt_to_phys(buffer), 16, 0 );
-
- return( 1 );
-}
-
-
-/*
- * ACSI status phase: get the status byte from the bus
- *
- * I've seen several times that a 0xff status is read, propably due to
- * a timing error. In this case, the procedure is repeated after the
- * next _IRQ edge.
- */
-
-int acsi_getstatus( void )
-
-{ int status;
-
- DISABLE_IRQ();
- for(;;) {
- if (!acsi_wait_for_IRQ( 100 )) {
- acsi_delay_start();
- return( -1 );
- }
- dma_wd.dma_mode_status = 0x8a;
- MFPDELAY();
- status = dma_wd.fdc_acces_seccount;
- if (status != 0xff) break;
-#ifdef DEBUG
- printk("ACSI: skipping 0xff status byte\n" );
-#endif
- udelay(40);
- acsi_wait_for_noIRQ( 20 );
- }
- dma_wd.dma_mode_status = 0x80;
- udelay(40);
- acsi_wait_for_noIRQ( 20 );
-
- acsi_delay_start();
- return( status & 0x1f ); /* mask of the device# */
-}
-
-
-#if (defined(CONFIG_ATARI_SLM) || defined(CONFIG_ATARI_SLM_MODULE))
-
-/* Receive data in an extended status phase. Needed by SLM printer. */
-
-int acsi_extstatus( char *buffer, int cnt )
-
-{ int status;
-
- DISABLE_IRQ();
- udelay(80);
- while( cnt-- > 0 ) {
- if (!acsi_wait_for_IRQ( 40 )) return( 0 );
- dma_wd.dma_mode_status = 0x8a;
- MFPDELAY();
- status = dma_wd.fdc_acces_seccount;
- MFPDELAY();
- *buffer++ = status & 0xff;
- udelay(40);
- }
- return( 1 );
-}
-
-
-/* Finish an extended status phase */
-
-void acsi_end_extstatus( void )
-
-{
- dma_wd.dma_mode_status = 0x80;
- udelay(40);
- acsi_wait_for_noIRQ( 20 );
- acsi_delay_start();
-}
-
-
-/* Send data in an extended command phase */
-
-int acsi_extcmd( unsigned char *buffer, int cnt )
-
-{
- while( cnt-- > 0 ) {
- DMA_LONG_WRITE( *buffer++, 0x8a );
- udelay(20);
- if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
- }
- return( 1 );
-}
-
-#endif
-
-
-static void acsi_print_error(const unsigned char *errblk, struct acsi_info_struct *aip)
-
-{ int atari_err, i, errcode;
- struct acsi_error *arr;
-
- atari_err = aip->old_atari_disk;
- if (atari_err)
- errcode = errblk[0] & 0x7f;
- else
- if ((errblk[0] & 0x70) == 0x70)
- errcode = errblk[2] & 0x0f;
- else
- errcode = errblk[0] & 0x0f;
-
- printk( KERN_ERR "ACSI error 0x%02x", errcode );
-
- if (errblk[0] & 0x80)
- printk( " for sector %d",
- ((errblk[1] & 0x1f) << 16) |
- (errblk[2] << 8) | errblk[0] );
-
- arr = atari_err ? atari_acsi_errors : scsi_acsi_errors;
- i = atari_err ? sizeof(atari_acsi_errors)/sizeof(*atari_acsi_errors) :
- sizeof(scsi_acsi_errors)/sizeof(*scsi_acsi_errors);
-
- for( --i; i >= 0; --i )
- if (arr[i].code == errcode) break;
- if (i >= 0)
- printk( ": %s\n", arr[i].text );
-}
-
-/*******************************************************************
- *
- * ACSI interrupt routine
- * Test, if this is a ACSI interrupt and call the irq handler
- * Otherwise ignore this interrupt.
- *
- *******************************************************************/
-
-static irqreturn_t acsi_interrupt(int irq, void *data )
-
-{ void (*acsi_irq_handler)(void) = do_acsi;
-
- do_acsi = NULL;
- CLEAR_TIMER();
-
- if (!acsi_irq_handler)
- acsi_irq_handler = unexpected_acsi_interrupt;
- acsi_irq_handler();
- return IRQ_HANDLED;
-}
-
-
-/******************************************************************
- *
- * The Interrupt handlers
- *
- *******************************************************************/
-
-
-static void unexpected_acsi_interrupt( void )
-
-{
- printk( KERN_WARNING "Unexpected ACSI interrupt\n" );
-}
-
-
-/* This function is called in case of errors. Because we cannot reset
- * the ACSI bus or a single device, there is no other choice than
- * retrying several times :-(
- */
-
-static void bad_rw_intr( void )
-
-{
- if (!CURRENT)
- return;
-
- if (++CURRENT->errors >= MAX_ERRORS)
- end_request(CURRENT, 0);
- /* Otherwise just retry */
-}
-
-
-static void read_intr( void )
-
-{ int status;
-
- status = acsi_getstatus();
- if (status != 0) {
- struct gendisk *disk = CURRENT->rq_disk;
- struct acsi_info_struct *aip = disk->private_data;
- printk(KERN_ERR "%s: ", disk->disk_name);
- if (!acsi_reqsense(acsi_buffer, aip->target, aip->lun))
- printk( "ACSI error and REQUEST SENSE failed (status=0x%02x)\n", status );
- else {
- acsi_print_error(acsi_buffer, aip);
- if (CARTRCH_STAT(aip, acsi_buffer))
- aip->changed = 1;
- }
- ENABLE_IRQ();
- bad_rw_intr();
- redo_acsi_request();
- return;
- }
-
- dma_cache_maintenance( virt_to_phys(CurrentBuffer), CurrentNSect*512, 0 );
- if (CurrentBuffer == acsi_buffer)
- copy_from_acsibuffer();
-
- do_end_requests();
- redo_acsi_request();
-}
-
-
-static void write_intr(void)
-
-{ int status;
-
- status = acsi_getstatus();
- if (status != 0) {
- struct gendisk *disk = CURRENT->rq_disk;
- struct acsi_info_struct *aip = disk->private_data;
- printk( KERN_ERR "%s: ", disk->disk_name);
- if (!acsi_reqsense( acsi_buffer, aip->target, aip->lun))
- printk( "ACSI error and REQUEST SENSE failed (status=0x%02x)\n", status );
- else {
- acsi_print_error(acsi_buffer, aip);
- if (CARTRCH_STAT(aip, acsi_buffer))
- aip->changed = 1;
- }
- bad_rw_intr();
- redo_acsi_request();
- return;
- }
-
- do_end_requests();
- redo_acsi_request();
-}
-
-
-static void acsi_times_out( unsigned long dummy )
-
-{
- DISABLE_IRQ();
- if (!do_acsi) return;
-
- do_acsi = NULL;
- printk( KERN_ERR "ACSI timeout\n" );
- if (!CURRENT)
- return;
- if (++CURRENT->errors >= MAX_ERRORS) {
-#ifdef DEBUG
- printk( KERN_ERR "ACSI: too many errors.\n" );
-#endif
- end_request(CURRENT, 0);
- }
-
- redo_acsi_request();
-}
-
-
-
-/***********************************************************************
- *
- * Scatter-gather utility functions
- *
- ***********************************************************************/
-
-
-static void copy_to_acsibuffer( void )
-
-{ int i;
- char *src, *dst;
- struct buffer_head *bh;
-
- src = CURRENT->buffer;
- dst = acsi_buffer;
- bh = CURRENT->bh;
-
- if (!bh)
- memcpy( dst, src, CurrentNSect*512 );
- else
- for( i = 0; i < CurrentNReq; ++i ) {
- memcpy( dst, src, bh->b_size );
- dst += bh->b_size;
- if ((bh = bh->b_reqnext))
- src = bh->b_data;
- }
-}
-
-
-static void copy_from_acsibuffer( void )
-
-{ int i;
- char *src, *dst;
- struct buffer_head *bh;
-
- dst = CURRENT->buffer;
- src = acsi_buffer;
- bh = CURRENT->bh;
-
- if (!bh)
- memcpy( dst, src, CurrentNSect*512 );
- else
- for( i = 0; i < CurrentNReq; ++i ) {
- memcpy( dst, src, bh->b_size );
- src += bh->b_size;
- if ((bh = bh->b_reqnext))
- dst = bh->b_data;
- }
-}
-
-
-static void do_end_requests( void )
-
-{ int i, n;
-
- if (!CURRENT->bh) {
- CURRENT->nr_sectors -= CurrentNSect;
- CURRENT->current_nr_sectors -= CurrentNSect;
- CURRENT->sector += CurrentNSect;
- if (CURRENT->nr_sectors == 0)
- end_request(CURRENT, 1);
- }
- else {
- for( i = 0; i < CurrentNReq; ++i ) {
- n = CURRENT->bh->b_size >> 9;
- CURRENT->nr_sectors -= n;
- CURRENT->current_nr_sectors -= n;
- CURRENT->sector += n;
- end_request(CURRENT, 1);
- }
- }
-}
-
-
-
-
-/***********************************************************************
- *
- * do_acsi_request and friends
- *
- ***********************************************************************/
-
-static void do_acsi_request( request_queue_t * q )
-
-{
- stdma_lock( acsi_interrupt, NULL );
- redo_acsi_request();
-}
-
-
-static void redo_acsi_request( void )
-{
- unsigned block, target, lun, nsect;
- char *buffer;
- unsigned long pbuffer;
- struct buffer_head *bh;
- struct gendisk *disk;
- struct acsi_info_struct *aip;
-
- repeat:
- CLEAR_TIMER();
-
- if (do_acsi)
- return;
-
- if (!CURRENT) {
- do_acsi = NULL;
- ENABLE_IRQ();
- stdma_release();
- return;
- }
-
- disk = CURRENT->rq_disk;
- aip = disk->private_data;
- if (CURRENT->bh) {
- if (!CURRENT->bh && !buffer_locked(CURRENT->bh))
- panic("ACSI: block not locked");
- }
-
- block = CURRENT->sector;
- if (block+CURRENT->nr_sectors >= get_capacity(disk)) {
-#ifdef DEBUG
- printk( "%s: attempted access for blocks %d...%ld past end of device at block %ld.\n",
- disk->disk_name,
- block, block + CURRENT->nr_sectors - 1,
- get_capacity(disk));
-#endif
- end_request(CURRENT, 0);
- goto repeat;
- }
- if (aip->changed) {
- printk( KERN_NOTICE "%s: request denied because cartridge has "
- "been changed.\n", disk->disk_name);
- end_request(CURRENT, 0);
- goto repeat;
- }
-
- target = aip->target;
- lun = aip->lun;
-
- /* Find out how many sectors should be transferred from/to
- * consecutive buffers and thus can be done with a single command.
- */
- buffer = CURRENT->buffer;
- pbuffer = virt_to_phys(buffer);
- nsect = CURRENT->current_nr_sectors;
- CurrentNReq = 1;
-
- if ((bh = CURRENT->bh) && bh != CURRENT->bhtail) {
- if (!STRAM_ADDR(pbuffer)) {
- /* If transfer is done via the ACSI buffer anyway, we can
- * assemble as much bh's as fit in the buffer.
- */
- while( (bh = bh->b_reqnext) ) {
- if (nsect + (bh->b_size>>9) > ACSI_BUFFER_SECTORS) break;
- nsect += bh->b_size >> 9;
- ++CurrentNReq;
- if (bh == CURRENT->bhtail) break;
- }
- buffer = acsi_buffer;
- pbuffer = phys_acsi_buffer;
- }
- else {
- unsigned long pendadr, pnewadr;
- pendadr = pbuffer + nsect*512;
- while( (bh = bh->b_reqnext) ) {
- pnewadr = virt_to_phys(bh->b_data);
- if (!STRAM_ADDR(pnewadr) || pendadr != pnewadr) break;
- nsect += bh->b_size >> 9;
- pendadr = pnewadr + bh->b_size;
- ++CurrentNReq;
- if (bh == CURRENT->bhtail) break;
- }
- }
- }
- else {
- if (!STRAM_ADDR(pbuffer)) {
- buffer = acsi_buffer;
- pbuffer = phys_acsi_buffer;
- if (nsect > ACSI_BUFFER_SECTORS)
- nsect = ACSI_BUFFER_SECTORS;
- }
- }
- CurrentBuffer = buffer;
- CurrentNSect = nsect;
-
- if (rq_data_dir(CURRENT) == WRITE) {
- CMDSET_TARG_LUN( write_cmd, target, lun );
- CMDSET_BLOCK( write_cmd, block );
- CMDSET_LEN( write_cmd, nsect );
- if (buffer == acsi_buffer)
- copy_to_acsibuffer();
- dma_cache_maintenance( pbuffer, nsect*512, 1 );
- do_acsi = write_intr;
- if (!acsicmd_dma( write_cmd, buffer, nsect, 1, 1)) {
- do_acsi = NULL;
- printk( KERN_ERR "ACSI (write): Timeout in command block\n" );
- bad_rw_intr();
- goto repeat;
- }
- SET_TIMER();
- return;
- }
- if (rq_data_dir(CURRENT) == READ) {
- CMDSET_TARG_LUN( read_cmd, target, lun );
- CMDSET_BLOCK( read_cmd, block );
- CMDSET_LEN( read_cmd, nsect );
- do_acsi = read_intr;
- if (!acsicmd_dma( read_cmd, buffer, nsect, 0, 1)) {
- do_acsi = NULL;
- printk( KERN_ERR "ACSI (read): Timeout in command block\n" );
- bad_rw_intr();
- goto repeat;
- }
- SET_TIMER();
- return;
- }
- panic("unknown ACSI command");
-}
-
-
-
-/***********************************************************************
- *
- * Misc functions: ioctl, open, release, check_change, ...
- *
- ***********************************************************************/
-
-static int acsi_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct acsi_info_struct *aip = bdev->bd_disk->private_data;
-
- /*
- * Just fake some geometry here, it's nonsense anyway
- * To make it easy, use Adaptec's usual 64/32 mapping
- */
- geo->heads = 64;
- geo->sectors = 32;
- geo->cylinders = aip->size >> 11;
- return 0;
-}
-
-static int acsi_ioctl( struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg )
-{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct acsi_info_struct *aip = disk->private_data;
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- /* SCSI compatible GET_IDLUN call to get target's ID and LUN number */
- put_user( aip->target | (aip->lun << 8),
- &((Scsi_Idlun *) arg)->dev_id );
- put_user( 0, &((Scsi_Idlun *) arg)->host_unique_id );
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-
-/*
- * Open a device, check for read-only and lock the medium if it is
- * removable.
- *
- * Changes by Martin Rogge, 9th Aug 1995:
- * Check whether check_disk_change (and therefore revalidate_acsidisk)
- * was successful. They fail when there is no medium in the drive.
- *
- * The problem of media being changed during an operation can be
- * ignored because of the prevent_removal code.
- *
- * Added check for the validity of the device number.
- *
- */
-
-static int acsi_open( struct inode * inode, struct file * filp )
-{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct acsi_info_struct *aip = disk->private_data;
-
- if (aip->access_count == 0 && aip->removable) {
-#if 0
- aip->changed = 1; /* safety first */
-#endif
- check_disk_change( inode->i_bdev );
- if (aip->changed) /* revalidate was not successful (no medium) */
- return -ENXIO;
- acsi_prevent_removal(aip, 1);
- }
- aip->access_count++;
-
- if (filp && filp->f_mode) {
- check_disk_change( inode->i_bdev );
- if (filp->f_mode & 2) {
- if (aip->read_only) {
- acsi_release( inode, filp );
- return -EROFS;
- }
- }
- }
-
- return 0;
-}
-
-/*
- * Releasing a block device means we sync() it, so that it can safely
- * be forgotten about...
- */
-
-static int acsi_release( struct inode * inode, struct file * file )
-{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct acsi_info_struct *aip = disk->private_data;
- if (--aip->access_count == 0 && aip->removable)
- acsi_prevent_removal(aip, 0);
- return( 0 );
-}
-
-/*
- * Prevent or allow a media change for removable devices.
- */
-
-static void acsi_prevent_removal(struct acsi_info_struct *aip, int flag)
-{
- stdma_lock( NULL, NULL );
-
- CMDSET_TARG_LUN(pa_med_rem_cmd, aip->target, aip->lun);
- CMDSET_LEN( pa_med_rem_cmd, flag );
-
- if (acsicmd_nodma(pa_med_rem_cmd, 0) && acsi_wait_for_IRQ(3*HZ))
- acsi_getstatus();
- /* Do not report errors -- some devices may not know this command. */
-
- ENABLE_IRQ();
- stdma_release();
-}
-
-static int acsi_media_change(struct gendisk *disk)
-{
- struct acsi_info_struct *aip = disk->private_data;
-
- if (!aip->removable)
- return 0;
-
- if (aip->changed)
- /* We can be sure that the medium has been changed -- REQUEST
- * SENSE has reported this earlier.
- */
- return 1;
-
- /* If the flag isn't set, make a test by reading block 0.
- * If errors happen, it seems to be better to say "changed"...
- */
- stdma_lock( NULL, NULL );
- CMDSET_TARG_LUN(read_cmd, aip->target, aip->lun);
- CMDSET_BLOCK( read_cmd, 0 );
- CMDSET_LEN( read_cmd, 1 );
- if (acsicmd_dma(read_cmd, acsi_buffer, 1, 0, 0) &&
- acsi_wait_for_IRQ(3*HZ)) {
- if (acsi_getstatus()) {
- if (acsi_reqsense(acsi_buffer, aip->target, aip->lun)) {
- if (CARTRCH_STAT(aip, acsi_buffer))
- aip->changed = 1;
- }
- else {
- printk( KERN_ERR "%s: REQUEST SENSE failed in test for "
- "medium change; assuming a change\n", disk->disk_name );
- aip->changed = 1;
- }
- }
- }
- else {
- printk( KERN_ERR "%s: Test for medium changed timed out; "
- "assuming a change\n", disk->disk_name);
- aip->changed = 1;
- }
- ENABLE_IRQ();
- stdma_release();
-
- /* Now, after reading a block, the changed status is surely valid. */
- return aip->changed;
-}
-
-
-static int acsi_change_blk_size( int target, int lun)
-
-{ int i;
-
- for (i=0; i<12; i++)
- acsi_buffer[i] = 0;
-
- acsi_buffer[3] = 8;
- acsi_buffer[10] = 2;
- CMDSET_TARG_LUN( modeselect_cmd, target, lun);
-
- if (!acsicmd_dma( modeselect_cmd, acsi_buffer, 1,1,0) ||
- !acsi_wait_for_IRQ( 3*HZ ) ||
- acsi_getstatus() != 0 ) {
- return(0);
- }
- return(1);
-}
-
-
-static int acsi_mode_sense( int target, int lun, SENSE_DATA *sd )
-
-{
- int page;
-
- CMDSET_TARG_LUN( modesense_cmd, target, lun );
- for (page=0; page<4; page++) {
- modesense_cmd[2] = page;
- if (!acsicmd_dma( modesense_cmd, acsi_buffer, 1, 0, 0 ) ||
- !acsi_wait_for_IRQ( 3*HZ ) ||
- acsi_getstatus())
- continue;
-
- /* read twice to jump over the second 16-byte border! */
- udelay(300);
- if (acsi_wait_for_noIRQ( 20 ) &&
- acsicmd_nodma( modesense_cmd, 0 ) &&
- acsi_wait_for_IRQ( 3*HZ ) &&
- acsi_getstatus() == 0)
- break;
- }
- if (page == 4) {
- return(0);
- }
-
- dma_cache_maintenance( phys_acsi_buffer, sizeof(SENSE_DATA), 0 );
- *sd = *(SENSE_DATA *)acsi_buffer;
-
- /* Validity check, depending on type of data */
-
- switch( SENSE_TYPE(*sd) ) {
-
- case SENSE_TYPE_ATARI:
- if (CAPACITY(*sd) == 0)
- goto invalid_sense;
- break;
-
- case SENSE_TYPE_SCSI:
- if (sd->scsi.descriptor_size != 8)
- goto invalid_sense;
- break;
-
- case SENSE_TYPE_UNKNOWN:
-
- printk( KERN_ERR "ACSI target %d, lun %d: Cannot interpret "
- "sense data\n", target, lun );
-
- invalid_sense:
-
-#ifdef DEBUG
- { int i;
- printk( "Mode sense data for ACSI target %d, lun %d seem not valid:",
- target, lun );
- for( i = 0; i < sizeof(SENSE_DATA); ++i )
- printk( "%02x ", (unsigned char)acsi_buffer[i] );
- printk( "\n" );
- }
-#endif
- return( 0 );
- }
-
- return( 1 );
-}
-
-
-
-/*******************************************************************
- *
- * Initialization
- *
- ********************************************************************/
-
-
-extern struct block_device_operations acsi_fops;
-
-static struct gendisk *acsi_gendisk[MAX_DEV];
-
-#define MAX_SCSI_DEVICE_CODE 10
-
-static const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
-{
- "Direct-Access ",
- "Sequential-Access",
- "Printer ",
- "Processor ",
- "WORM ",
- "CD-ROM ",
- "Scanner ",
- "Optical Device ",
- "Medium Changer ",
- "Communications "
-};
-
-static void print_inquiry(unsigned char *data)
-{
- int i;
-
- printk(KERN_INFO " Vendor: ");
- for (i = 8; i < 16; i++)
- {
- if (data[i] >= 0x20 && i < data[4] + 5)
- printk("%c", data[i]);
- else
- printk(" ");
- }
-
- printk(" Model: ");
- for (i = 16; i < 32; i++)
- {
- if (data[i] >= 0x20 && i < data[4] + 5)
- printk("%c", data[i]);
- else
- printk(" ");
- }
-
- printk(" Rev: ");
- for (i = 32; i < 36; i++)
- {
- if (data[i] >= 0x20 && i < data[4] + 5)
- printk("%c", data[i]);
- else
- printk(" ");
- }
-
- printk("\n");
-
- i = data[0] & 0x1f;
-
- printk(KERN_INFO " Type: %s ", (i < MAX_SCSI_DEVICE_CODE
- ? scsi_device_types[i]
- : "Unknown "));
- printk(" ANSI SCSI revision: %02x", data[2] & 0x07);
- if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
- printk(" CCS\n");
- else
- printk("\n");
-}
-
-
-/*
- * Changes by Martin Rogge, 9th Aug 1995:
- * acsi_devinit has been taken out of acsi_geninit, because it needs
- * to be called from revalidate_acsidisk. The result of request sense
- * is now checked for DRIVE NOT READY.
- *
- * The structure *aip is only valid when acsi_devinit returns
- * DEV_SUPPORTED.
- *
- */
-
-#define DEV_NONE 0
-#define DEV_UNKNOWN 1
-#define DEV_SUPPORTED 2
-#define DEV_SLM 3
-
-static int acsi_devinit(struct acsi_info_struct *aip)
-{
- int status, got_inquiry;
- SENSE_DATA sense;
- unsigned char reqsense, extsense;
-
- /*****************************************************************/
- /* Do a TEST UNIT READY command to test the presence of a device */
- /*****************************************************************/
-
- CMDSET_TARG_LUN(tur_cmd, aip->target, aip->lun);
- if (!acsicmd_nodma(tur_cmd, 0)) {
- /* timed out -> no device here */
-#ifdef DEBUG_DETECT
- printk("target %d lun %d: timeout\n", aip->target, aip->lun);
-#endif
- return DEV_NONE;
- }
-
- /*************************/
- /* Read the ACSI status. */
- /*************************/
-
- status = acsi_getstatus();
- if (status) {
- if (status == 0x12) {
- /* The SLM printer should be the only device that
- * responds with the error code in the status byte. In
- * correct status bytes, bit 4 is never set.
- */
- printk( KERN_INFO "Detected SLM printer at id %d lun %d\n",
- aip->target, aip->lun);
- return DEV_SLM;
- }
- /* ignore CHECK CONDITION, since some devices send a
- UNIT ATTENTION */
- if ((status & 0x1e) != 0x2) {
-#ifdef DEBUG_DETECT
- printk("target %d lun %d: status %d\n",
- aip->target, aip->lun, status);
-#endif
- return DEV_UNKNOWN;
- }
- }
-
- /*******************************/
- /* Do a REQUEST SENSE command. */
- /*******************************/
-
- if (!acsi_reqsense(acsi_buffer, aip->target, aip->lun)) {
- printk( KERN_WARNING "acsi_reqsense failed\n");
- acsi_buffer[0] = 0;
- acsi_buffer[2] = UNIT_ATTENTION;
- }
- reqsense = acsi_buffer[0];
- extsense = acsi_buffer[2] & 0xf;
- if (status) {
- if ((reqsense & 0x70) == 0x70) { /* extended sense */
- if (extsense != UNIT_ATTENTION &&
- extsense != NOT_READY) {
-#ifdef DEBUG_DETECT
- printk("target %d lun %d: extended sense %d\n",
- aip->target, aip->lun, extsense);
-#endif
- return DEV_UNKNOWN;
- }
- }
- else {
- if (reqsense & 0x7f) {
-#ifdef DEBUG_DETECT
- printk("target %d lun %d: sense %d\n",
- aip->target, aip->lun, reqsense);
-#endif
- return DEV_UNKNOWN;
- }
- }
- }
- else
- if (reqsense == 0x4) { /* SH204 Bug workaround */
-#ifdef DEBUG_DETECT
- printk("target %d lun %d status=0 sense=4\n",
- aip->target, aip->lun);
-#endif
- return DEV_UNKNOWN;
- }
-
- /***********************************************************/
- /* Do an INQUIRY command to get more infos on this device. */
- /***********************************************************/
-
- /* Assume default values */
- aip->removable = 1;
- aip->read_only = 0;
- aip->old_atari_disk = 0;
- aip->changed = (extsense == NOT_READY); /* medium inserted? */
- aip->size = DEFAULT_SIZE;
- got_inquiry = 0;
- /* Fake inquiry result for old atari disks */
- memcpy(acsi_buffer, "\000\000\001\000 Adaptec 40xx"
- " ", 40);
- CMDSET_TARG_LUN(inquiry_cmd, aip->target, aip->lun);
- if (acsicmd_dma(inquiry_cmd, acsi_buffer, 1, 0, 0) &&
- acsi_getstatus() == 0) {
- acsicmd_nodma(inquiry_cmd, 0);
- acsi_getstatus();
- dma_cache_maintenance( phys_acsi_buffer, 256, 0 );
- got_inquiry = 1;
- aip->removable = !!(acsi_buffer[1] & 0x80);
- }
- if (aip->type == NONE) /* only at boot time */
- print_inquiry(acsi_buffer);
- switch(acsi_buffer[0]) {
- case TYPE_DISK:
- aip->type = HARDDISK;
- break;
- case TYPE_ROM:
- aip->type = CDROM;
- aip->read_only = 1;
- break;
- default:
- return DEV_UNKNOWN;
- }
- /****************************/
- /* Do a MODE SENSE command. */
- /****************************/
-
- if (!acsi_mode_sense(aip->target, aip->lun, &sense)) {
- printk( KERN_WARNING "No mode sense data.\n" );
- return DEV_UNKNOWN;
- }
- if ((SECTOR_SIZE(sense) != 512) &&
- ((aip->type != CDROM) ||
- !acsi_change_blk_size(aip->target, aip->lun) ||
- !acsi_mode_sense(aip->target, aip->lun, &sense) ||
- (SECTOR_SIZE(sense) != 512))) {
- printk( KERN_WARNING "Sector size != 512 not supported.\n" );
- return DEV_UNKNOWN;
- }
- /* There are disks out there that claim to have 0 sectors... */
- if (CAPACITY(sense))
- aip->size = CAPACITY(sense); /* else keep DEFAULT_SIZE */
- if (!got_inquiry && SENSE_TYPE(sense) == SENSE_TYPE_ATARI) {
- /* If INQUIRY failed and the sense data suggest an old
- * Atari disk (SH20x, Megafile), the disk is not removable
- */
- aip->removable = 0;
- aip->old_atari_disk = 1;
- }
-
- /******************/
- /* We've done it. */
- /******************/
-
- return DEV_SUPPORTED;
-}
-
-EXPORT_SYMBOL(acsi_delay_start);
-EXPORT_SYMBOL(acsi_delay_end);
-EXPORT_SYMBOL(acsi_wait_for_IRQ);
-EXPORT_SYMBOL(acsi_wait_for_noIRQ);
-EXPORT_SYMBOL(acsicmd_nodma);
-EXPORT_SYMBOL(acsi_getstatus);
-EXPORT_SYMBOL(acsi_buffer);
-EXPORT_SYMBOL(phys_acsi_buffer);
-
-#ifdef CONFIG_ATARI_SLM_MODULE
-void acsi_attach_SLMs( int (*attach_func)( int, int ) );
-
-EXPORT_SYMBOL(acsi_extstatus);
-EXPORT_SYMBOL(acsi_end_extstatus);
-EXPORT_SYMBOL(acsi_extcmd);
-EXPORT_SYMBOL(acsi_attach_SLMs);
-
-/* to remember IDs of SLM devices, SLM module is loaded later
- * (index is target#, contents is lun#, -1 means "no SLM") */
-int SLM_devices[8];
-#endif
-
-static struct block_device_operations acsi_fops = {
- .owner = THIS_MODULE,
- .open = acsi_open,
- .release = acsi_release,
- .ioctl = acsi_ioctl,
- .getgeo = acsi_getgeo,
- .media_changed = acsi_media_change,
- .revalidate_disk= acsi_revalidate,
-};
-
-#ifdef CONFIG_ATARI_SLM_MODULE
-/* call attach_slm() for each device that is a printer; needed for init of SLM
- * driver as a module, since it's not yet present if acsi.c is inited and thus
- * the bus gets scanned. */
-void acsi_attach_SLMs( int (*attach_func)( int, int ) )
-{
- int i, n = 0;
-
- for( i = 0; i < 8; ++i )
- if (SLM_devices[i] >= 0)
- n += (*attach_func)( i, SLM_devices[i] );
- printk( KERN_INFO "Found %d SLM printer(s) total.\n", n );
-}
-#endif /* CONFIG_ATARI_SLM_MODULE */
-
-
-int acsi_init( void )
-{
- int err = 0;
- int i, target, lun;
- struct acsi_info_struct *aip;
-#ifdef CONFIG_ATARI_SLM
- int n_slm = 0;
-#endif
- if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ACSI))
- return 0;
- if (register_blkdev(ACSI_MAJOR, "ad")) {
- err = -EBUSY;
- goto out1;
- }
- if (!(acsi_buffer =
- (char *)atari_stram_alloc(ACSI_BUFFER_SIZE, "acsi"))) {
- err = -ENOMEM;
- printk( KERN_ERR "Unable to get ACSI ST-Ram buffer.\n" );
- goto out2;
- }
- phys_acsi_buffer = virt_to_phys( acsi_buffer );
- STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
-
- acsi_queue = blk_init_queue(do_acsi_request, &acsi_lock);
- if (!acsi_queue) {
- err = -ENOMEM;
- goto out2a;
- }
-#ifdef CONFIG_ATARI_SLM
- err = slm_init();
-#endif
- if (err)
- goto out3;
-
- printk( KERN_INFO "Probing ACSI devices:\n" );
- NDevices = 0;
-#ifdef CONFIG_ATARI_SLM_MODULE
- for( i = 0; i < 8; ++i )
- SLM_devices[i] = -1;
-#endif
- stdma_lock(NULL, NULL);
-
- for (target = 0; target < 8 && NDevices < MAX_DEV; ++target) {
- lun = 0;
- do {
- aip = &acsi_info[NDevices];
- aip->type = NONE;
- aip->target = target;
- aip->lun = lun;
- i = acsi_devinit(aip);
- switch (i) {
- case DEV_SUPPORTED:
- printk( KERN_INFO "Detected ");
- switch (aip->type) {
- case HARDDISK:
- printk("disk");
- break;
- case CDROM:
- printk("cdrom");
- break;
- default:
- }
- printk(" ad%c at id %d lun %d ",
- 'a' + NDevices, target, lun);
- if (aip->removable)
- printk("(removable) ");
- if (aip->read_only)
- printk("(read-only) ");
- if (aip->size == DEFAULT_SIZE)
- printk(" unkown size, using default ");
- printk("%ld MByte\n",
- (aip->size*512+1024*1024/2)/(1024*1024));
- NDevices++;
- break;
- case DEV_SLM:
-#ifdef CONFIG_ATARI_SLM
- n_slm += attach_slm( target, lun );
- break;
-#endif
-#ifdef CONFIG_ATARI_SLM_MODULE
- SLM_devices[target] = lun;
- break;
-#endif
- /* neither of the above: fall through to unknown device */
- case DEV_UNKNOWN:
- printk( KERN_INFO "Detected unsupported device at "
- "id %d lun %d\n", target, lun);
- break;
- }
- }
-#ifdef CONFIG_ACSI_MULTI_LUN
- while (i != DEV_NONE && ++lun < MAX_LUN);
-#else
- while (0);
-#endif
- }
-
- /* reenable interrupt */
- ENABLE_IRQ();
- stdma_release();
-
-#ifndef CONFIG_ATARI_SLM
- printk( KERN_INFO "Found %d ACSI device(s) total.\n", NDevices );
-#else
- printk( KERN_INFO "Found %d ACSI device(s) and %d SLM printer(s) total.\n",
- NDevices, n_slm );
-#endif
- err = -ENOMEM;
- for( i = 0; i < NDevices; ++i ) {
- acsi_gendisk[i] = alloc_disk(16);
- if (!acsi_gendisk[i])
- goto out4;
- }
-
- for( i = 0; i < NDevices; ++i ) {
- struct gendisk *disk = acsi_gendisk[i];
- sprintf(disk->disk_name, "ad%c", 'a'+i);
- aip = &acsi_info[NDevices];
- disk->major = ACSI_MAJOR;
- disk->first_minor = i << 4;
- if (acsi_info[i].type != HARDDISK)
- disk->minors = 1;
- disk->fops = &acsi_fops;
- disk->private_data = &acsi_info[i];
- set_capacity(disk, acsi_info[i].size);
- disk->queue = acsi_queue;
- add_disk(disk);
- }
- return 0;
-out4:
- while (i--)
- put_disk(acsi_gendisk[i]);
-out3:
- blk_cleanup_queue(acsi_queue);
-out2a:
- atari_stram_free( acsi_buffer );
-out2:
- unregister_blkdev( ACSI_MAJOR, "ad" );
-out1:
- return err;
-}
-
-
-#ifdef MODULE
-
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- int err;
-
- if ((err = acsi_init()))
- return( err );
- printk( KERN_INFO "ACSI driver loaded as module.\n");
- return( 0 );
-}
-
-void cleanup_module(void)
-{
- int i;
- del_timer( &acsi_timer );
- blk_cleanup_queue(acsi_queue);
- atari_stram_free( acsi_buffer );
-
- if (unregister_blkdev( ACSI_MAJOR, "ad" ) != 0)
- printk( KERN_ERR "acsi: cleanup_module failed\n");
-
- for (i = 0; i < NDevices; i++) {
- del_gendisk(acsi_gendisk[i]);
- put_disk(acsi_gendisk[i]);
- }
-}
-#endif
-
-/*
- * This routine is called to flush all partitions and partition tables
- * for a changed scsi disk, and then re-read the new partition table.
- * If we are revalidating a disk because of a media change, then we
- * enter with usage == 0. If we are using an ioctl, we automatically have
- * usage == 1 (we need an open channel to use an ioctl :-), so this
- * is our limit.
- *
- * Changes by Martin Rogge, 9th Aug 1995:
- * got cd-roms to work by calling acsi_devinit. There are only two problems:
- * First, if there is no medium inserted, the status will remain "changed".
- * That is no problem at all, but our design of three-valued logic (medium
- * changed, medium not changed, no medium inserted).
- * Secondly the check could fail completely and the drive could deliver
- * nonsensical data, which could mess up the acsi_info[] structure. In
- * that case we try to make the entry safe.
- *
- */
-
-static int acsi_revalidate(struct gendisk *disk)
-{
- struct acsi_info_struct *aip = disk->private_data;
- stdma_lock( NULL, NULL );
- if (acsi_devinit(aip) != DEV_SUPPORTED) {
- printk( KERN_ERR "ACSI: revalidate failed for target %d lun %d\n",
- aip->target, aip->lun);
- aip->size = 0;
- aip->read_only = 1;
- aip->removable = 1;
- aip->changed = 1; /* next acsi_open will try again... */
- }
-
- ENABLE_IRQ();
- stdma_release();
- set_capacity(disk, aip->size);
- return 0;
-}
diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c
deleted file mode 100644
index 1d9d9b4f48c..00000000000
--- a/drivers/block/acsi_slm.c
+++ /dev/null
@@ -1,1032 +0,0 @@
-/*
- * acsi_slm.c -- Device driver for the Atari SLM laser printer
- *
- * Copyright 1995 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- */
-
-/*
-
-Notes:
-
-The major number for SLM printers is 28 (like ACSI), but as a character
-device, not block device. The minor number is the number of the printer (if
-you have more than one SLM; currently max. 2 (#define-constant) SLMs are
-supported). The device can be opened for reading and writing. If reading it,
-you get some status infos (MODE SENSE data). Writing mode is used for the data
-to be printed. Some ioctls allow to get the printer status and to tune printer
-modes and some internal variables.
-
-A special problem of the SLM driver is the timing and thus the buffering of
-the print data. The problem is that all the data for one page must be present
-in memory when printing starts, else --when swapping occurs-- the timing could
-not be guaranteed. There are several ways to assure this:
-
- 1) Reserve a buffer of 1196k (maximum page size) statically by
- atari_stram_alloc(). The data are collected there until they're complete,
- and then printing starts. Since the buffer is reserved, no further
- considerations about memory and swapping are needed. So this is the
- simplest method, but it needs a lot of memory for just the SLM.
-
- An striking advantage of this method is (supposed the SLM_CONT_CNT_REPROG
- method works, see there), that there are no timing problems with the DMA
- anymore.
-
- 2) The other method would be to reserve the buffer dynamically each time
- printing is required. I could think of looking at mem_map where the
- largest unallocted ST-RAM area is, taking the area, and then extending it
- by swapping out the neighbored pages, until the needed size is reached.
- This requires some mm hacking, but seems possible. The only obstacle could
- be pages that cannot be swapped out (reserved pages)...
-
- 3) Another possibility would be to leave the real data in user space and to
- work with two dribble buffers of about 32k in the driver: While the one
- buffer is DMAed to the SLM, the other can be filled with new data. But
- to keep the timing, that requires that the user data remain in memory and
- are not swapped out. Requires mm hacking, too, but maybe not so bad as
- method 2).
-
-*/
-
-#include <linux/module.h>
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atari_acsi.h>
-#include <asm/atari_stdma.h>
-#include <asm/atari_stram.h>
-#include <asm/atari_SLM.h>
-
-
-#undef DEBUG
-
-/* Define this if the page data are continuous in physical memory. That
- * requires less reprogramming of the ST-DMA */
-#define SLM_CONTINUOUS_DMA
-
-/* Use continuous reprogramming of the ST-DMA counter register. This is
- * --strictly speaking-- not allowed, Atari recommends not to look at the
- * counter register while a DMA is going on. But I don't know if that applies
- * only for reading the register, or also writing to it. Writing only works
- * fine for me... The advantage is that the timing becomes absolutely
- * uncritical: Just update each, say 200ms, the counter reg to its maximum,
- * and the DMA will work until the status byte interrupt occurs.
- */
-#define SLM_CONT_CNT_REPROG
-
-#define CMDSET_TARG_LUN(cmd,targ,lun) \
- do { \
- cmd[0] = (cmd[0] & ~0xe0) | (targ)<<5; \
- cmd[1] = (cmd[1] & ~0xe0) | (lun)<<5; \
- } while(0)
-
-#define START_TIMER(to) mod_timer(&slm_timer, jiffies + (to))
-#define STOP_TIMER() del_timer(&slm_timer)
-
-
-static char slmreqsense_cmd[6] = { 0x03, 0, 0, 0, 0, 0 };
-static char slmprint_cmd[6] = { 0x0a, 0, 0, 0, 0, 0 };
-static char slminquiry_cmd[6] = { 0x12, 0, 0, 0, 0, 0x80 };
-static char slmmsense_cmd[6] = { 0x1a, 0, 0, 0, 255, 0 };
-#if 0
-static char slmmselect_cmd[6] = { 0x15, 0, 0, 0, 0, 0 };
-#endif
-
-
-#define MAX_SLM 2
-
-static struct slm {
- unsigned target; /* target number */
- unsigned lun; /* LUN in target controller */
- atomic_t wr_ok; /* set to 0 if output part busy */
- atomic_t rd_ok; /* set to 0 if status part busy */
-} slm_info[MAX_SLM];
-
-int N_SLM_Printers = 0;
-
-/* printer buffer */
-static unsigned char *SLMBuffer; /* start of buffer */
-static unsigned char *BufferP; /* current position in buffer */
-static int BufferSize; /* length of buffer for page size */
-
-typedef enum { IDLE, FILLING, PRINTING } SLMSTATE;
-static SLMSTATE SLMState;
-static int SLMBufOwner; /* SLM# currently using the buffer */
-
-/* DMA variables */
-#ifndef SLM_CONT_CNT_REPROG
-static unsigned long SLMCurAddr; /* current base addr of DMA chunk */
-static unsigned long SLMEndAddr; /* expected end addr */
-static unsigned long SLMSliceSize; /* size of one DMA chunk */
-#endif
-static int SLMError;
-
-/* wait queues */
-static DECLARE_WAIT_QUEUE_HEAD(slm_wait); /* waiting for buffer */
-static DECLARE_WAIT_QUEUE_HEAD(print_wait); /* waiting for printing finished */
-
-/* status codes */
-#define SLMSTAT_OK 0x00
-#define SLMSTAT_ORNERY 0x02
-#define SLMSTAT_TONER 0x03
-#define SLMSTAT_WARMUP 0x04
-#define SLMSTAT_PAPER 0x05
-#define SLMSTAT_DRUM 0x06
-#define SLMSTAT_INJAM 0x07
-#define SLMSTAT_THRJAM 0x08
-#define SLMSTAT_OUTJAM 0x09
-#define SLMSTAT_COVER 0x0a
-#define SLMSTAT_FUSER 0x0b
-#define SLMSTAT_IMAGER 0x0c
-#define SLMSTAT_MOTOR 0x0d
-#define SLMSTAT_VIDEO 0x0e
-#define SLMSTAT_SYSTO 0x10
-#define SLMSTAT_OPCODE 0x12
-#define SLMSTAT_DEVNUM 0x15
-#define SLMSTAT_PARAM 0x1a
-#define SLMSTAT_ACSITO 0x1b /* driver defined */
-#define SLMSTAT_NOTALL 0x1c /* driver defined */
-
-static char *SLMErrors[] = {
- /* 0x00 */ "OK and ready",
- /* 0x01 */ NULL,
- /* 0x02 */ "ornery printer",
- /* 0x03 */ "toner empty",
- /* 0x04 */ "warming up",
- /* 0x05 */ "paper empty",
- /* 0x06 */ "drum empty",
- /* 0x07 */ "input jam",
- /* 0x08 */ "through jam",
- /* 0x09 */ "output jam",
- /* 0x0a */ "cover open",
- /* 0x0b */ "fuser malfunction",
- /* 0x0c */ "imager malfunction",
- /* 0x0d */ "motor malfunction",
- /* 0x0e */ "video malfunction",
- /* 0x0f */ NULL,
- /* 0x10 */ "printer system timeout",
- /* 0x11 */ NULL,
- /* 0x12 */ "invalid operation code",
- /* 0x13 */ NULL,
- /* 0x14 */ NULL,
- /* 0x15 */ "invalid device number",
- /* 0x16 */ NULL,
- /* 0x17 */ NULL,
- /* 0x18 */ NULL,
- /* 0x19 */ NULL,
- /* 0x1a */ "invalid parameter list",
- /* 0x1b */ "ACSI timeout",
- /* 0x1c */ "not all printed"
-};
-
-#define N_ERRORS (sizeof(SLMErrors)/sizeof(*SLMErrors))
-
-/* real (driver caused) error? */
-#define IS_REAL_ERROR(x) (x > 0x10)
-
-
-static struct {
- char *name;
- int w, h;
-} StdPageSize[] = {
- { "Letter", 2400, 3180 },
- { "Legal", 2400, 4080 },
- { "A4", 2336, 3386 },
- { "B5", 2016, 2914 }
-};
-
-#define N_STD_SIZES (sizeof(StdPageSize)/sizeof(*StdPageSize))
-
-#define SLM_BUFFER_SIZE (2336*3386/8) /* A4 for now */
-#define SLM_DMA_AMOUNT 255 /* #sectors to program the DMA for */
-
-#ifdef SLM_CONTINUOUS_DMA
-# define SLM_DMA_INT_OFFSET 0 /* DMA goes until seccnt 0, no offs */
-# define SLM_DMA_END_OFFSET 32 /* 32 Byte ST-DMA FIFO */
-# define SLM_SLICE_SIZE(w) (255*512)
-#else
-# define SLM_DMA_INT_OFFSET 32 /* 32 Byte ST-DMA FIFO */
-# define SLM_DMA_END_OFFSET 32 /* 32 Byte ST-DMA FIFO */
-# define SLM_SLICE_SIZE(w) ((254*512)/(w/8)*(w/8))
-#endif
-
-/* calculate the number of jiffies to wait for 'n' bytes */
-#ifdef SLM_CONT_CNT_REPROG
-#define DMA_TIME_FOR(n) 50
-#define DMA_STARTUP_TIME 0
-#else
-#define DMA_TIME_FOR(n) (n/1400-1)
-#define DMA_STARTUP_TIME 650
-#endif
-
-/***************************** Prototypes *****************************/
-
-static char *slm_errstr( int stat );
-static int slm_getstats( char *buffer, int device );
-static ssize_t slm_read( struct file* file, char *buf, size_t count, loff_t
- *ppos );
-static void start_print( int device );
-static irqreturn_t slm_interrupt(int irc, void *data);
-static void slm_test_ready( unsigned long dummy );
-static void set_dma_addr( unsigned long paddr );
-static unsigned long get_dma_addr( void );
-static ssize_t slm_write( struct file *file, const char *buf, size_t count,
- loff_t *ppos );
-static int slm_ioctl( struct inode *inode, struct file *file, unsigned int
- cmd, unsigned long arg );
-static int slm_open( struct inode *inode, struct file *file );
-static int slm_release( struct inode *inode, struct file *file );
-static int slm_req_sense( int device );
-static int slm_mode_sense( int device, char *buffer, int abs_flag );
-#if 0
-static int slm_mode_select( int device, char *buffer, int len, int
- default_flag );
-#endif
-static int slm_get_pagesize( int device, int *w, int *h );
-
-/************************* End of Prototypes **************************/
-
-
-static DEFINE_TIMER(slm_timer, slm_test_ready, 0, 0);
-
-static const struct file_operations slm_fops = {
- .owner = THIS_MODULE,
- .read = slm_read,
- .write = slm_write,
- .ioctl = slm_ioctl,
- .open = slm_open,
- .release = slm_release,
-};
-
-
-/* ---------------------------------------------------------------------- */
-/* Status Functions */
-
-
-static char *slm_errstr( int stat )
-
-{ char *p;
- static char str[22];
-
- stat &= 0x1f;
- if (stat >= 0 && stat < N_ERRORS && (p = SLMErrors[stat]))
- return( p );
- sprintf( str, "unknown status 0x%02x", stat );
- return( str );
-}
-
-
-static int slm_getstats( char *buffer, int device )
-
-{ int len = 0, stat, i, w, h;
- unsigned char buf[256];
-
- stat = slm_mode_sense( device, buf, 0 );
- if (IS_REAL_ERROR(stat))
- return( -EIO );
-
-#define SHORTDATA(i) ((buf[i] << 8) | buf[i+1])
-#define BOOLDATA(i,mask) ((buf[i] & mask) ? "on" : "off")
-
- w = SHORTDATA( 3 );
- h = SHORTDATA( 1 );
-
- len += sprintf( buffer+len, "Status\t\t%s\n",
- slm_errstr( stat ) );
- len += sprintf( buffer+len, "Page Size\t%dx%d",
- w, h );
-
- for( i = 0; i < N_STD_SIZES; ++i ) {
- if (w == StdPageSize[i].w && h == StdPageSize[i].h)
- break;
- }
- if (i < N_STD_SIZES)
- len += sprintf( buffer+len, " (%s)", StdPageSize[i].name );
- buffer[len++] = '\n';
-
- len += sprintf( buffer+len, "Top/Left Margin\t%d/%d\n",
- SHORTDATA( 5 ), SHORTDATA( 7 ) );
- len += sprintf( buffer+len, "Manual Feed\t%s\n",
- BOOLDATA( 9, 0x01 ) );
- len += sprintf( buffer+len, "Input Select\t%d\n",
- (buf[9] >> 1) & 7 );
- len += sprintf( buffer+len, "Auto Select\t%s\n",
- BOOLDATA( 9, 0x10 ) );
- len += sprintf( buffer+len, "Prefeed Paper\t%s\n",
- BOOLDATA( 9, 0x20 ) );
- len += sprintf( buffer+len, "Thick Pixels\t%s\n",
- BOOLDATA( 9, 0x40 ) );
- len += sprintf( buffer+len, "H/V Resol.\t%d/%d dpi\n",
- SHORTDATA( 12 ), SHORTDATA( 10 ) );
- len += sprintf( buffer+len, "System Timeout\t%d\n",
- buf[14] );
- len += sprintf( buffer+len, "Scan Time\t%d\n",
- SHORTDATA( 15 ) );
- len += sprintf( buffer+len, "Page Count\t%d\n",
- SHORTDATA( 17 ) );
- len += sprintf( buffer+len, "In/Out Cap.\t%d/%d\n",
- SHORTDATA( 19 ), SHORTDATA( 21 ) );
- len += sprintf( buffer+len, "Stagger Output\t%s\n",
- BOOLDATA( 23, 0x01 ) );
- len += sprintf( buffer+len, "Output Select\t%d\n",
- (buf[23] >> 1) & 7 );
- len += sprintf( buffer+len, "Duplex Print\t%s\n",
- BOOLDATA( 23, 0x10 ) );
- len += sprintf( buffer+len, "Color Sep.\t%s\n",
- BOOLDATA( 23, 0x20 ) );
-
- return( len );
-}
-
-
-static ssize_t slm_read( struct file *file, char *buf, size_t count,
- loff_t *ppos )
-
-{
- struct inode *node = file->f_path.dentry->d_inode;
- unsigned long page;
- int length;
- int end;
-
- if (!(page = __get_free_page( GFP_KERNEL )))
- return( -ENOMEM );
-
- length = slm_getstats( (char *)page, iminor(node) );
- if (length < 0) {
- count = length;
- goto out;
- }
- if (file->f_pos >= length) {
- count = 0;
- goto out;
- }
- if (count + file->f_pos > length)
- count = length - file->f_pos;
- end = count + file->f_pos;
- if (copy_to_user(buf, (char *)page + file->f_pos, count)) {
- count = -EFAULT;
- goto out;
- }
- file->f_pos = end;
-out: free_page( page );
- return( count );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* Printing */
-
-
-static void start_print( int device )
-
-{ struct slm *sip = &slm_info[device];
- unsigned char *cmd;
- unsigned long paddr;
- int i;
-
- stdma_lock( slm_interrupt, NULL );
-
- CMDSET_TARG_LUN( slmprint_cmd, sip->target, sip->lun );
- cmd = slmprint_cmd;
- paddr = virt_to_phys( SLMBuffer );
- dma_cache_maintenance( paddr, virt_to_phys(BufferP)-paddr, 1 );
- DISABLE_IRQ();
-
- /* Low on A1 */
- dma_wd.dma_mode_status = 0x88;
- MFPDELAY();
-
- /* send the command bytes except the last */
- for( i = 0; i < 5; ++i ) {
- DMA_LONG_WRITE( *cmd++, 0x8a );
- udelay(20);
- if (!acsi_wait_for_IRQ( HZ/2 )) {
- SLMError = 1;
- return; /* timeout */
- }
- }
- /* last command byte */
- DMA_LONG_WRITE( *cmd++, 0x82 );
- MFPDELAY();
- /* set DMA address */
- set_dma_addr( paddr );
- /* program DMA for write and select sector counter reg */
- dma_wd.dma_mode_status = 0x192;
- MFPDELAY();
- /* program for 255*512 bytes and start DMA */
- DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
-
-#ifndef SLM_CONT_CNT_REPROG
- SLMCurAddr = paddr;
- SLMEndAddr = paddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
-#endif
- START_TIMER( DMA_STARTUP_TIME + DMA_TIME_FOR( SLMSliceSize ));
-#if !defined(SLM_CONT_CNT_REPROG) && defined(DEBUG)
- printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
- SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
-#endif
-
- ENABLE_IRQ();
-}
-
-
-/* Only called when an error happened or at the end of a page */
-
-static irqreturn_t slm_interrupt(int irc, void *data)
-
-{ unsigned long addr;
- int stat;
-
- STOP_TIMER();
- addr = get_dma_addr();
- stat = acsi_getstatus();
- SLMError = (stat < 0) ? SLMSTAT_ACSITO :
- (addr < virt_to_phys(BufferP)) ? SLMSTAT_NOTALL :
- stat;
-
- dma_wd.dma_mode_status = 0x80;
- MFPDELAY();
-#ifdef DEBUG
- printk( "SLM: interrupt, addr=%#lx, error=%d\n", addr, SLMError );
-#endif
-
- wake_up( &print_wait );
- stdma_release();
- ENABLE_IRQ();
- return IRQ_HANDLED;
-}
-
-
-static void slm_test_ready( unsigned long dummy )
-
-{
-#ifdef SLM_CONT_CNT_REPROG
- /* program for 255*512 bytes again */
- dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
- START_TIMER( DMA_TIME_FOR(0) );
-#ifdef DEBUG
- printk( "SLM: reprogramming timer for %d jiffies, addr=%#lx\n",
- DMA_TIME_FOR(0), get_dma_addr() );
-#endif
-
-#else /* !SLM_CONT_CNT_REPROG */
-
- unsigned long flags, addr;
- int d, ti;
-#ifdef DEBUG
- struct timeval start_tm, end_tm;
- int did_wait = 0;
-#endif
-
- local_irq_save(flags);
-
- addr = get_dma_addr();
- if ((d = SLMEndAddr - addr) > 0) {
- local_irq_restore(flags);
-
- /* slice not yet finished, decide whether to start another timer or to
- * busy-wait */
- ti = DMA_TIME_FOR( d );
- if (ti > 0) {
-#ifdef DEBUG
- printk( "SLM: reprogramming timer for %d jiffies, rest %d bytes\n",
- ti, d );
-#endif
- START_TIMER( ti );
- return;
- }
- /* wait for desired end address to be reached */
-#ifdef DEBUG
- do_gettimeofday( &start_tm );
- did_wait = 1;
-#endif
- local_irq_disable();
- while( get_dma_addr() < SLMEndAddr )
- barrier();
- }
-
- /* slice finished, start next one */
- SLMCurAddr += SLMSliceSize;
-
-#ifdef SLM_CONTINUOUS_DMA
- /* program for 255*512 bytes again */
- dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
-#else
- /* set DMA address;
- * add 2 bytes for the ones in the SLM controller FIFO! */
- set_dma_addr( SLMCurAddr + 2 );
- /* toggle DMA to write and select sector counter reg */
- dma_wd.dma_mode_status = 0x92;
- MFPDELAY();
- dma_wd.dma_mode_status = 0x192;
- MFPDELAY();
- /* program for 255*512 bytes and start DMA */
- DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
-#endif
-
- local_irq_restore(flags);
-
-#ifdef DEBUG
- if (did_wait) {
- int ms;
- do_gettimeofday( &end_tm );
- ms = (end_tm.tv_sec*1000000+end_tm.tv_usec) -
- (start_tm.tv_sec*1000000+start_tm.tv_usec);
- printk( "SLM: did %ld.%ld ms busy waiting for %d bytes\n",
- ms/1000, ms%1000, d );
- }
- else
- printk( "SLM: didn't wait (!)\n" );
-#endif
-
- if ((unsigned char *)PTOV( SLMCurAddr + SLMSliceSize ) >= BufferP) {
- /* will be last slice, no timer necessary */
-#ifdef DEBUG
- printk( "SLM: CurAddr=%#lx EndAddr=%#lx last slice -> no timer\n",
- SLMCurAddr, SLMEndAddr );
-#endif
- }
- else {
- /* not last slice */
- SLMEndAddr = SLMCurAddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
- START_TIMER( DMA_TIME_FOR( SLMSliceSize ));
-#ifdef DEBUG
- printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
- SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
-#endif
- }
-#endif /* SLM_CONT_CNT_REPROG */
-}
-
-
-static void set_dma_addr( unsigned long paddr )
-
-{ unsigned long flags;
-
- local_irq_save(flags);
- dma_wd.dma_lo = (unsigned char)paddr;
- paddr >>= 8;
- MFPDELAY();
- dma_wd.dma_md = (unsigned char)paddr;
- paddr >>= 8;
- MFPDELAY();
- if (ATARIHW_PRESENT( EXTD_DMA ))
- st_dma_ext_dmahi = (unsigned short)paddr;
- else
- dma_wd.dma_hi = (unsigned char)paddr;
- MFPDELAY();
- local_irq_restore(flags);
-}
-
-
-static unsigned long get_dma_addr( void )
-
-{ unsigned long addr;
-
- addr = dma_wd.dma_lo & 0xff;
- MFPDELAY();
- addr |= (dma_wd.dma_md & 0xff) << 8;
- MFPDELAY();
- addr |= (dma_wd.dma_hi & 0xff) << 16;
- MFPDELAY();
-
- return( addr );
-}
-
-
-static ssize_t slm_write( struct file *file, const char *buf, size_t count,
- loff_t *ppos )
-
-{
- struct inode *node = file->f_path.dentry->d_inode;
- int device = iminor(node);
- int n, filled, w, h;
-
- while( SLMState == PRINTING ||
- (SLMState == FILLING && SLMBufOwner != device) ) {
- interruptible_sleep_on( &slm_wait );
- if (signal_pending(current))
- return( -ERESTARTSYS );
- }
- if (SLMState == IDLE) {
- /* first data of page: get current page size */
- if (slm_get_pagesize( device, &w, &h ))
- return( -EIO );
- BufferSize = w*h/8;
- if (BufferSize > SLM_BUFFER_SIZE)
- return( -ENOMEM );
-
- SLMState = FILLING;
- SLMBufOwner = device;
- }
-
- n = count;
- filled = BufferP - SLMBuffer;
- if (filled + n > BufferSize)
- n = BufferSize - filled;
-
- if (copy_from_user(BufferP, buf, n))
- return -EFAULT;
- BufferP += n;
- filled += n;
-
- if (filled == BufferSize) {
- /* Check the paper size again! The user may have switched it in the
- * time between starting the data and finishing them. Would end up in
- * a trashy page... */
- if (slm_get_pagesize( device, &w, &h ))
- return( -EIO );
- if (BufferSize != w*h/8) {
- printk( KERN_NOTICE "slm%d: page size changed while printing\n",
- device );
- return( -EAGAIN );
- }
-
- SLMState = PRINTING;
- /* choose a slice size that is a multiple of the line size */
-#ifndef SLM_CONT_CNT_REPROG
- SLMSliceSize = SLM_SLICE_SIZE(w);
-#endif
-
- start_print( device );
- sleep_on( &print_wait );
- if (SLMError && IS_REAL_ERROR(SLMError)) {
- printk( KERN_ERR "slm%d: %s\n", device, slm_errstr(SLMError) );
- n = -EIO;
- }
-
- SLMState = IDLE;
- BufferP = SLMBuffer;
- wake_up_interruptible( &slm_wait );
- }
-
- return( n );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* ioctl Functions */
-
-
-static int slm_ioctl( struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg )
-
-{ int device = iminor(inode), err;
-
- /* I can think of setting:
- * - manual feed
- * - paper format
- * - copy count
- * - ...
- * but haven't implemented that yet :-)
- * BTW, has anybody better docs about the MODE SENSE/MODE SELECT data?
- */
- switch( cmd ) {
-
- case SLMIORESET: /* reset buffer, i.e. empty the buffer */
- if (!(file->f_mode & 2))
- return( -EINVAL );
- if (SLMState == PRINTING)
- return( -EBUSY );
- SLMState = IDLE;
- BufferP = SLMBuffer;
- wake_up_interruptible( &slm_wait );
- return( 0 );
-
- case SLMIOGSTAT: { /* get status */
- int stat;
- char *str;
-
- stat = slm_req_sense( device );
- if (arg) {
- str = slm_errstr( stat );
- if (put_user(stat,
- (long *)&((struct SLM_status *)arg)->stat))
- return -EFAULT;
- if (copy_to_user( ((struct SLM_status *)arg)->str, str,
- strlen(str) + 1))
- return -EFAULT;
- }
- return( stat );
- }
-
- case SLMIOGPSIZE: { /* get paper size */
- int w, h;
-
- if ((err = slm_get_pagesize( device, &w, &h ))) return( err );
-
- if (put_user(w, (long *)&((struct SLM_paper_size *)arg)->width))
- return -EFAULT;
- if (put_user(h, (long *)&((struct SLM_paper_size *)arg)->height))
- return -EFAULT;
- return( 0 );
- }
-
- case SLMIOGMFEED: /* get manual feed */
- return( -EINVAL );
-
- case SLMIOSPSIZE: /* set paper size */
- return( -EINVAL );
-
- case SLMIOSMFEED: /* set manual feed */
- return( -EINVAL );
-
- }
- return( -EINVAL );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* Opening and Closing */
-
-
-static int slm_open( struct inode *inode, struct file *file )
-
-{ int device;
- struct slm *sip;
-
- device = iminor(inode);
- if (device >= N_SLM_Printers)
- return( -ENXIO );
- sip = &slm_info[device];
-
- if (file->f_mode & 2) {
- /* open for writing is exclusive */
- if ( !atomic_dec_and_test(&sip->wr_ok) ) {
- atomic_inc(&sip->wr_ok);
- return( -EBUSY );
- }
- }
- if (file->f_mode & 1) {
- /* open for reading is exclusive */
- if ( !atomic_dec_and_test(&sip->rd_ok) ) {
- atomic_inc(&sip->rd_ok);
- return( -EBUSY );
- }
- }
-
- return( 0 );
-}
-
-
-static int slm_release( struct inode *inode, struct file *file )
-
-{ int device;
- struct slm *sip;
-
- device = iminor(inode);
- sip = &slm_info[device];
-
- if (file->f_mode & 2)
- atomic_inc( &sip->wr_ok );
- if (file->f_mode & 1)
- atomic_inc( &sip->rd_ok );
-
- return( 0 );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* ACSI Primitives for the SLM */
-
-
-static int slm_req_sense( int device )
-
-{ int stat, rv;
- struct slm *sip = &slm_info[device];
-
- stdma_lock( NULL, NULL );
-
- CMDSET_TARG_LUN( slmreqsense_cmd, sip->target, sip->lun );
- if (!acsicmd_nodma( slmreqsense_cmd, 0 ) ||
- (stat = acsi_getstatus()) < 0)
- rv = SLMSTAT_ACSITO;
- else
- rv = stat & 0x1f;
-
- ENABLE_IRQ();
- stdma_release();
- return( rv );
-}
-
-
-static int slm_mode_sense( int device, char *buffer, int abs_flag )
-
-{ unsigned char stat, len;
- int rv = 0;
- struct slm *sip = &slm_info[device];
-
- stdma_lock( NULL, NULL );
-
- CMDSET_TARG_LUN( slmmsense_cmd, sip->target, sip->lun );
- slmmsense_cmd[5] = abs_flag ? 0x80 : 0;
- if (!acsicmd_nodma( slmmsense_cmd, 0 )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- if (!acsi_extstatus( &stat, 1 )) {
- acsi_end_extstatus();
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- if (!acsi_extstatus( &len, 1 )) {
- acsi_end_extstatus();
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
- buffer[0] = len;
- if (!acsi_extstatus( buffer+1, len )) {
- acsi_end_extstatus();
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- acsi_end_extstatus();
- rv = stat & 0x1f;
-
- the_end:
- ENABLE_IRQ();
- stdma_release();
- return( rv );
-}
-
-
-#if 0
-/* currently unused */
-static int slm_mode_select( int device, char *buffer, int len,
- int default_flag )
-
-{ int stat, rv;
- struct slm *sip = &slm_info[device];
-
- stdma_lock( NULL, NULL );
-
- CMDSET_TARG_LUN( slmmselect_cmd, sip->target, sip->lun );
- slmmselect_cmd[5] = default_flag ? 0x80 : 0;
- if (!acsicmd_nodma( slmmselect_cmd, 0 )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- if (!default_flag) {
- unsigned char c = len;
- if (!acsi_extcmd( &c, 1 )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
- if (!acsi_extcmd( buffer, len )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
- }
-
- stat = acsi_getstatus();
- rv = (stat < 0 ? SLMSTAT_ACSITO : stat);
-
- the_end:
- ENABLE_IRQ();
- stdma_release();
- return( rv );
-}
-#endif
-
-
-static int slm_get_pagesize( int device, int *w, int *h )
-
-{ char buf[256];
- int stat;
-
- stat = slm_mode_sense( device, buf, 0 );
- ENABLE_IRQ();
- stdma_release();
-
- if (stat != SLMSTAT_OK)
- return( -EIO );
-
- *w = (buf[3] << 8) | buf[4];
- *h = (buf[1] << 8) | buf[2];
- return( 0 );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* Initialization */
-
-
-int attach_slm( int target, int lun )
-
-{ static int did_register;
- int len;
-
- if (N_SLM_Printers >= MAX_SLM) {
- printk( KERN_WARNING "Too much SLMs\n" );
- return( 0 );
- }
-
- /* do an INQUIRY */
- udelay(100);
- CMDSET_TARG_LUN( slminquiry_cmd, target, lun );
- if (!acsicmd_nodma( slminquiry_cmd, 0 )) {
- inq_timeout:
- printk( KERN_ERR "SLM inquiry command timed out.\n" );
- inq_fail:
- acsi_end_extstatus();
- return( 0 );
- }
- /* read status and header of return data */
- if (!acsi_extstatus( SLMBuffer, 6 ))
- goto inq_timeout;
-
- if (SLMBuffer[1] != 2) { /* device type == printer? */
- printk( KERN_ERR "SLM inquiry returned device type != printer\n" );
- goto inq_fail;
- }
- len = SLMBuffer[5];
-
- /* read id string */
- if (!acsi_extstatus( SLMBuffer, len ))
- goto inq_timeout;
- acsi_end_extstatus();
- SLMBuffer[len] = 0;
-
- if (!did_register) {
- did_register = 1;
- }
-
- slm_info[N_SLM_Printers].target = target;
- slm_info[N_SLM_Printers].lun = lun;
- atomic_set(&slm_info[N_SLM_Printers].wr_ok, 1 );
- atomic_set(&slm_info[N_SLM_Printers].rd_ok, 1 );
-
- printk( KERN_INFO " Printer: %s\n", SLMBuffer );
- printk( KERN_INFO "Detected slm%d at id %d lun %d\n",
- N_SLM_Printers, target, lun );
- N_SLM_Printers++;
- return( 1 );
-}
-
-int slm_init( void )
-
-{
- int i;
- if (register_chrdev( ACSI_MAJOR, "slm", &slm_fops )) {
- printk( KERN_ERR "Unable to get major %d for ACSI SLM\n", ACSI_MAJOR );
- return -EBUSY;
- }
-
- if (!(SLMBuffer = atari_stram_alloc( SLM_BUFFER_SIZE, "SLM" ))) {
- printk( KERN_ERR "Unable to get SLM ST-Ram buffer.\n" );
- unregister_chrdev( ACSI_MAJOR, "slm" );
- return -ENOMEM;
- }
- BufferP = SLMBuffer;
- SLMState = IDLE;
-
- return 0;
-}
-
-#ifdef MODULE
-
-/* from acsi.c */
-void acsi_attach_SLMs( int (*attach_func)( int, int ) );
-
-int init_module(void)
-{
- int err;
-
- if ((err = slm_init()))
- return( err );
- /* This calls attach_slm() for every target/lun where acsi.c detected a
- * printer */
- acsi_attach_SLMs( attach_slm );
- return( 0 );
-}
-
-void cleanup_module(void)
-{
- if (unregister_chrdev( ACSI_MAJOR, "slm" ) != 0)
- printk( KERN_ERR "acsi_slm: cleanup_module failed\n");
- atari_stram_free( SLMBuffer );
-}
-#endif
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 27a139025ce..6ce8b897e26 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1363,7 +1363,7 @@ static void redo_fd_request(void)
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
CURRENT->sector,cnt,
- (CURRENT->cmd==READ)?"read":"write");
+ (rq_data_dir(CURRENT) == READ) ? "read" : "write");
#endif
block = CURRENT->sector + cnt;
if ((int)block > floppy->blocks) {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 478489c568a..4f598270fa3 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -257,9 +257,9 @@ aoeblk_exit(void)
int __init
aoeblk_init(void)
{
- buf_pool_cache = kmem_cache_create("aoe_bufs",
+ buf_pool_cache = kmem_cache_create("aoe_bufs",
sizeof(struct buf),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (buf_pool_cache == NULL)
return -ENOMEM;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 5acc6c44aea..a2d6612b80d 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -87,6 +87,7 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -119,6 +120,7 @@ static struct board_type products[] = {
{0x3214103C, "Smart Array E200i", &SA5_access, 120},
{0x3215103C, "Smart Array E200i", &SA5_access, 120},
{0x3237103C, "Smart Array E500", &SA5_access, 512},
+ {0x323D103C, "Smart Array P700m", &SA5_access, 512},
{0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
};
@@ -1168,7 +1170,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
case SG_EMULATED_HOST:
case SG_IO:
case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_ioctl(filep, disk, cmd, argp);
+ return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
/* scsi_cmd_ioctl would normally handle these, below, but */
/* they aren't a good fit for cciss, as CD-ROMs are */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 90961a8ea89..4aca7ddfddd 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -555,7 +555,6 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
{
struct scsi_cmnd *cmd;
ctlr_info_t *ctlr;
- u64bit addr64;
ErrorInfo_struct *ei;
ei = cp->err_info;
@@ -569,20 +568,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
ctlr = hba[cp->ctlr];
- /* undo the DMA mappings */
-
- if (cmd->use_sg) {
- pci_unmap_sg(ctlr->pdev,
- cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- }
- else if (cmd->request_bufflen) {
- addr64.val32.lower = cp->SG[0].Addr.lower;
- addr64.val32.upper = cp->SG[0].Addr.upper;
- pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
@@ -597,7 +583,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
SCSI_SENSE_BUFFERSIZE :
ei->SenseLen);
- cmd->resid = ei->ResidualCnt;
+ scsi_set_resid(cmd, ei->ResidualCnt);
if(ei->CommandStatus != 0)
{ /* an error has occurred */
@@ -1204,46 +1190,29 @@ cciss_scatter_gather(struct pci_dev *pdev,
CommandList_struct *cp,
struct scsi_cmnd *cmd)
{
- unsigned int use_sg, nsegs=0, len;
- struct scatterlist *scatter = (struct scatterlist *) cmd->request_buffer;
+ unsigned int len;
+ struct scatterlist *sg;
__u64 addr64;
-
- /* is it just one virtual address? */
- if (!cmd->use_sg) {
- if (cmd->request_bufflen) { /* anything to xfer? */
-
- addr64 = (__u64) pci_map_single(pdev,
- cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
-
- cp->SG[0].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Len = cmd->request_bufflen;
- nsegs=1;
- }
- } /* else, must be a list of virtual addresses.... */
- else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
-
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
-
- for (nsegs=0; nsegs < use_sg; nsegs++) {
- addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
- len = sg_dma_len(&scatter[nsegs]);
- cp->SG[nsegs].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- cp->SG[nsegs].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- cp->SG[nsegs].Len = len;
- cp->SG[nsegs].Ext = 0; // we are not chaining
+ int use_sg, i;
+
+ BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg) { /* not too many addrs? */
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (__u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ cp->SG[i].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Len = len;
+ cp->SG[i].Ext = 0; // we are not chaining
}
- } else BUG();
+ }
- cp->Header.SGList = (__u8) nsegs; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */
+ cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
return;
}
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
new file mode 100644
index 00000000000..1634c2dd25e
--- /dev/null
+++ b/drivers/block/lguest_blk.c
@@ -0,0 +1,275 @@
+/* A simple block driver for lguest.
+ *
+ * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+//#define DEBUG
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/lguest_bus.h>
+
+static char next_block_index = 'a';
+
+struct blockdev
+{
+ spinlock_t lock;
+
+ /* The disk structure for the kernel. */
+ struct gendisk *disk;
+
+ /* The major number for this disk. */
+ int major;
+ int irq;
+
+ unsigned long phys_addr;
+ /* The mapped block page. */
+ struct lguest_block_page *lb_page;
+
+ /* We only have a single request outstanding at a time. */
+ struct lguest_dma dma;
+ struct request *req;
+};
+
+/* Jens gave me this nice helper to end all chunks of a request. */
+static void end_entire_request(struct request *req, int uptodate)
+{
+ if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
+ BUG();
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req, uptodate);
+}
+
+static irqreturn_t lgb_irq(int irq, void *_bd)
+{
+ struct blockdev *bd = _bd;
+ unsigned long flags;
+
+ if (!bd->req) {
+ pr_debug("No work!\n");
+ return IRQ_NONE;
+ }
+
+ if (!bd->lb_page->result) {
+ pr_debug("No result!\n");
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&bd->lock, flags);
+ end_entire_request(bd->req, bd->lb_page->result == 1);
+ bd->req = NULL;
+ bd->dma.used_len = 0;
+ blk_start_queue(bd->disk->queue);
+ spin_unlock_irqrestore(&bd->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
+{
+ unsigned int i = 0, idx, len = 0;
+ struct bio *bio;
+
+ rq_for_each_bio(bio, req) {
+ struct bio_vec *bvec;
+ bio_for_each_segment(bvec, bio, idx) {
+ BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
+ BUG_ON(!bvec->bv_len);
+ dma->addr[i] = page_to_phys(bvec->bv_page)
+ + bvec->bv_offset;
+ dma->len[i] = bvec->bv_len;
+ len += bvec->bv_len;
+ i++;
+ }
+ }
+ if (i < LGUEST_MAX_DMA_SECTIONS)
+ dma->len[i] = 0;
+ return len;
+}
+
+static void empty_dma(struct lguest_dma *dma)
+{
+ dma->len[0] = 0;
+}
+
+static void setup_req(struct blockdev *bd,
+ int type, struct request *req, struct lguest_dma *dma)
+{
+ bd->lb_page->type = type;
+ bd->lb_page->sector = req->sector;
+ bd->lb_page->result = 0;
+ bd->req = req;
+ bd->lb_page->bytes = req_to_dma(req, dma);
+}
+
+static void do_write(struct blockdev *bd, struct request *req)
+{
+ struct lguest_dma send;
+
+ pr_debug("lgb: WRITE sector %li\n", (long)req->sector);
+ setup_req(bd, 1, req, &send);
+
+ lguest_send_dma(bd->phys_addr, &send);
+}
+
+static void do_read(struct blockdev *bd, struct request *req)
+{
+ struct lguest_dma ping;
+
+ pr_debug("lgb: READ sector %li\n", (long)req->sector);
+ setup_req(bd, 0, req, &bd->dma);
+
+ empty_dma(&ping);
+ lguest_send_dma(bd->phys_addr, &ping);
+}
+
+static void do_lgb_request(request_queue_t *q)
+{
+ struct blockdev *bd;
+ struct request *req;
+
+again:
+ req = elv_next_request(q);
+ if (!req)
+ return;
+
+ bd = req->rq_disk->private_data;
+ /* Sometimes we get repeated requests after blk_stop_queue. */
+ if (bd->req)
+ return;
+
+ if (!blk_fs_request(req)) {
+ pr_debug("Got non-command 0x%08x\n", req->cmd_type);
+ req->errors++;
+ end_entire_request(req, 0);
+ goto again;
+ }
+
+ if (rq_data_dir(req) == WRITE)
+ do_write(bd, req);
+ else
+ do_read(bd, req);
+
+ /* Wait for interrupt to tell us it's done. */
+ blk_stop_queue(q);
+}
+
+static struct block_device_operations lguestblk_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int lguestblk_probe(struct lguest_device *lgdev)
+{
+ struct blockdev *bd;
+ int err;
+ int irqflags = IRQF_SHARED;
+
+ bd = kmalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+
+ spin_lock_init(&bd->lock);
+ bd->irq = lgdev_irq(lgdev);
+ bd->req = NULL;
+ bd->dma.used_len = 0;
+ bd->dma.len[0] = 0;
+ bd->phys_addr = (lguest_devices[lgdev->index].pfn << PAGE_SHIFT);
+
+ bd->lb_page = lguest_map(bd->phys_addr, 1);
+ if (!bd->lb_page) {
+ err = -ENOMEM;
+ goto out_free_bd;
+ }
+
+ bd->major = register_blkdev(0, "lguestblk");
+ if (bd->major < 0) {
+ err = bd->major;
+ goto out_unmap;
+ }
+
+ bd->disk = alloc_disk(1);
+ if (!bd->disk) {
+ err = -ENOMEM;
+ goto out_unregister_blkdev;
+ }
+
+ bd->disk->queue = blk_init_queue(do_lgb_request, &bd->lock);
+ if (!bd->disk->queue) {
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+
+ /* We can only handle a certain number of sg entries */
+ blk_queue_max_hw_segments(bd->disk->queue, LGUEST_MAX_DMA_SECTIONS);
+ /* Buffers must not cross page boundaries */
+ blk_queue_segment_boundary(bd->disk->queue, PAGE_SIZE-1);
+
+ sprintf(bd->disk->disk_name, "lgb%c", next_block_index++);
+ if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
+ irqflags |= IRQF_SAMPLE_RANDOM;
+ err = request_irq(bd->irq, lgb_irq, irqflags, bd->disk->disk_name, bd);
+ if (err)
+ goto out_cleanup_queue;
+
+ err = lguest_bind_dma(bd->phys_addr, &bd->dma, 1, bd->irq);
+ if (err)
+ goto out_free_irq;
+
+ bd->disk->major = bd->major;
+ bd->disk->first_minor = 0;
+ bd->disk->private_data = bd;
+ bd->disk->fops = &lguestblk_fops;
+ /* This is initialized to the disk size by the other end. */
+ set_capacity(bd->disk, bd->lb_page->num_sectors);
+ add_disk(bd->disk);
+
+ printk(KERN_INFO "%s: device %i at major %d\n",
+ bd->disk->disk_name, lgdev->index, bd->major);
+
+ lgdev->private = bd;
+ return 0;
+
+out_free_irq:
+ free_irq(bd->irq, bd);
+out_cleanup_queue:
+ blk_cleanup_queue(bd->disk->queue);
+out_put_disk:
+ put_disk(bd->disk);
+out_unregister_blkdev:
+ unregister_blkdev(bd->major, "lguestblk");
+out_unmap:
+ lguest_unmap(bd->lb_page);
+out_free_bd:
+ kfree(bd);
+ return err;
+}
+
+static struct lguest_driver lguestblk_drv = {
+ .name = "lguestblk",
+ .owner = THIS_MODULE,
+ .device_type = LGUEST_DEVICE_T_BLOCK,
+ .probe = lguestblk_probe,
+};
+
+static __init int lguestblk_init(void)
+{
+ return register_lguest_driver(&lguestblk_drv);
+}
+module_init(lguestblk_init);
+
+MODULE_DESCRIPTION("Lguest block driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 0ed5470d253..e425daa1eac 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,12 +68,14 @@
#include <linux/loop.h>
#include <linux/compat.h>
#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* for invalidate_bdev() */
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/kthread.h>
+#include <linux/splice.h>
#include <asm/uaccess.h>
@@ -401,50 +403,73 @@ struct lo_read_data {
};
static int
-lo_read_actor(read_descriptor_t *desc, struct page *page,
- unsigned long offset, unsigned long size)
+lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+ struct splice_desc *sd)
{
- unsigned long count = desc->count;
- struct lo_read_data *p = desc->arg.data;
+ struct lo_read_data *p = sd->u.data;
struct loop_device *lo = p->lo;
+ struct page *page = buf->page;
sector_t IV;
+ size_t size;
+ int ret;
- IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
+ ret = buf->ops->confirm(pipe, buf);
+ if (unlikely(ret))
+ return ret;
- if (size > count)
- size = count;
+ IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
+ (buf->offset >> 9);
+ size = sd->len;
+ if (size > p->bsize)
+ size = p->bsize;
- if (lo_do_transfer(lo, READ, page, offset, p->page, p->offset, size, IV)) {
- size = 0;
+ if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
printk(KERN_ERR "loop: transfer error block %ld\n",
page->index);
- desc->error = -EINVAL;
+ size = -EINVAL;
}
flush_dcache_page(p->page);
- desc->count = count - size;
- desc->written += size;
- p->offset += size;
+ if (size > 0)
+ p->offset += size;
+
return size;
}
static int
+lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
+{
+ return __splice_from_pipe(pipe, sd, lo_splice_actor);
+}
+
+static int
do_lo_receive(struct loop_device *lo,
struct bio_vec *bvec, int bsize, loff_t pos)
{
struct lo_read_data cookie;
+ struct splice_desc sd;
struct file *file;
- int retval;
+ long retval;
cookie.lo = lo;
cookie.page = bvec->bv_page;
cookie.offset = bvec->bv_offset;
cookie.bsize = bsize;
+
+ sd.len = 0;
+ sd.total_len = bvec->bv_len;
+ sd.flags = 0;
+ sd.pos = pos;
+ sd.u.data = &cookie;
+
file = lo->lo_backing_file;
- retval = file->f_op->sendfile(file, &pos, bvec->bv_len,
- lo_read_actor, &cookie);
- return (retval < 0)? retval: 0;
+ retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
+
+ if (retval < 0)
+ return retval;
+
+ return 0;
}
static int
@@ -576,13 +601,6 @@ static int loop_thread(void *data)
struct loop_device *lo = data;
struct bio *bio;
- /*
- * loop can be used in an encrypted device,
- * hence, it mustn't be stopped at all
- * because it could be indirectly used during suspension
- */
- current->flags |= PF_NOFREEZE;
-
set_user_nice(current, -20);
while (!kthread_should_stop() || lo->lo_bio) {
@@ -679,8 +697,8 @@ static int loop_change_fd(struct loop_device *lo, struct file *lo_file,
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
goto out_putf;
- /* new backing store needs to support loop (eg sendfile) */
- if (!inode->i_fop->sendfile)
+ /* new backing store needs to support loop (eg splice_read) */
+ if (!inode->i_fop->splice_read)
goto out_putf;
/* size of the new backing store needs to be the same */
@@ -760,7 +778,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
* If we can't read - sorry. If we only can't write - well,
* it's going to be read-only.
*/
- if (!file->f_op->sendfile)
+ if (!file->f_op->splice_read)
goto out_putf;
if (aops->prepare_write && aops->commit_write)
lo_flags |= LO_FLAGS_USE_AOPS;
@@ -1550,8 +1568,7 @@ static void __exit loop_exit(void)
loop_del_one(lo);
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
- if (unregister_blkdev(LOOP_MAJOR, "loop"))
- printk(KERN_WARNING "loop: cannot unregister blkdev\n");
+ unregister_blkdev(LOOP_MAJOR, "loop");
}
module_init(loop_init);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 069ae39a9cd..c1295102409 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -122,17 +122,12 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
int result;
struct msghdr msg;
struct kvec iov;
- unsigned long flags;
- sigset_t oldset;
+ sigset_t blocked, oldset;
/* Allow interception of SIGKILL only
* Don't allow other signals to interrupt the transmission */
- spin_lock_irqsave(&current->sighand->siglock, flags);
- oldset = current->blocked;
- sigfillset(&current->blocked);
- sigdelsetmask(&current->blocked, sigmask(SIGKILL));
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ siginitsetinv(&blocked, sigmask(SIGKILL));
+ sigprocmask(SIG_SETMASK, &blocked, &oldset);
do {
sock->sk->sk_allocation = GFP_NOIO;
@@ -151,11 +146,9 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
if (signal_pending(current)) {
siginfo_t info;
- spin_lock_irqsave(&current->sighand->siglock, flags);
printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
- current->pid, current->comm,
- dequeue_signal(current, &current->blocked, &info));
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ current->pid, current->comm,
+ dequeue_signal_lock(current, &current->blocked, &info));
result = -EINTR;
break;
}
@@ -169,10 +162,7 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
buf += result;
} while (size > 0);
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->blocked = oldset;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ sigprocmask(SIG_SETMASK, &oldset, NULL);
return result;
}
@@ -416,7 +406,7 @@ static void nbd_clear_que(struct nbd_device *lo)
/*
* We always wait for result of write, for now. It would be nice to make it optional
* in future
- * if ((req->cmd == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
+ * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f1b9dd7d47d..31be33e4f11 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -146,8 +146,7 @@ static void pkt_kobj_release(struct kobject *kobj)
**********************************************************/
#define DEF_ATTR(_obj,_name,_mode) \
- static struct attribute _obj = { \
- .name = _name, .owner = THIS_MODULE, .mode = _mode }
+ static struct attribute _obj = { .name = _name, .mode = _mode }
/**********************************************************
/sys/class/pktcdvd/pktcdvd[0-7]/
@@ -1594,6 +1593,7 @@ static int kcdrwd(void *foobar)
long min_sleep_time, residue;
set_user_nice(current, -20);
+ set_freezable();
for (;;) {
DECLARE_WAITQUEUE(wait, current);
@@ -1653,9 +1653,6 @@ static int kcdrwd(void *foobar)
}
}
- if (signal_pending(current)) {
- flush_signals(current);
- }
if (kthread_should_stop())
break;
}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
new file mode 100644
index 00000000000..170fb33dba9
--- /dev/null
+++ b/drivers/block/ps3disk.c
@@ -0,0 +1,630 @@
+/*
+ * PS3 Disk Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/ata.h>
+#include <linux/blkdev.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+#include <asm/firmware.h>
+
+
+#define DEVICE_NAME "ps3disk"
+
+#define BOUNCE_SIZE (64*1024)
+
+#define PS3DISK_MAX_DISKS 16
+#define PS3DISK_MINORS 16
+
+
+#define PS3DISK_NAME "ps3d%c"
+
+
+struct ps3disk_private {
+ spinlock_t lock; /* Request queue spinlock */
+ struct request_queue *queue;
+ struct gendisk *gendisk;
+ unsigned int blocking_factor;
+ struct request *req;
+ u64 raw_capacity;
+ unsigned char model[ATA_ID_PROD_LEN+1];
+};
+
+
+#define LV1_STORAGE_SEND_ATA_COMMAND (2)
+#define LV1_STORAGE_ATA_HDDOUT (0x23)
+
+struct lv1_ata_cmnd_block {
+ u16 features;
+ u16 sector_count;
+ u16 LBA_low;
+ u16 LBA_mid;
+ u16 LBA_high;
+ u8 device;
+ u8 command;
+ u32 is_ext;
+ u32 proto;
+ u32 in_out;
+ u32 size;
+ u64 buffer;
+ u32 arglen;
+};
+
+enum lv1_ata_proto {
+ NON_DATA_PROTO = 0,
+ PIO_DATA_IN_PROTO = 1,
+ PIO_DATA_OUT_PROTO = 2,
+ DMA_PROTO = 3
+};
+
+enum lv1_ata_in_out {
+ DIR_WRITE = 0, /* memory -> device */
+ DIR_READ = 1 /* device -> memory */
+};
+
+static int ps3disk_major;
+
+
+static struct block_device_operations ps3disk_fops = {
+ .owner = THIS_MODULE,
+};
+
+
+static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
+ struct request *req, int gather)
+{
+ unsigned int offset = 0;
+ struct bio *bio;
+ sector_t sector;
+ struct bio_vec *bvec;
+ unsigned int i = 0, j;
+ size_t size;
+ void *buf;
+
+ rq_for_each_bio(bio, req) {
+ sector = bio->bi_sector;
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: bio %u: %u segs %u sectors from %lu\n",
+ __func__, __LINE__, i, bio_segments(bio),
+ bio_sectors(bio), sector);
+ bio_for_each_segment(bvec, bio, j) {
+ size = bvec->bv_len;
+ buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
+ if (gather)
+ memcpy(dev->bounce_buf+offset, buf, size);
+ else
+ memcpy(buf, dev->bounce_buf+offset, size);
+ offset += size;
+ flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
+ __bio_kunmap_atomic(bio, KM_IRQ0);
+ }
+ i++;
+ }
+}
+
+static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
+ struct request *req)
+{
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ int write = rq_data_dir(req), res;
+ const char *op = write ? "write" : "read";
+ u64 start_sector, sectors;
+ unsigned int region_id = dev->regions[dev->region_idx].id;
+
+#ifdef DEBUG
+ unsigned int n = 0;
+ struct bio *bio;
+
+ rq_for_each_bio(bio, req)
+ n++;
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
+ __func__, __LINE__, op, n, req->nr_sectors,
+ req->hard_nr_sectors);
+#endif
+
+ start_sector = req->sector * priv->blocking_factor;
+ sectors = req->nr_sectors * priv->blocking_factor;
+ dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n",
+ __func__, __LINE__, op, sectors, start_sector);
+
+ if (write) {
+ ps3disk_scatter_gather(dev, req, 1);
+
+ res = lv1_storage_write(dev->sbd.dev_id, region_id,
+ start_sector, sectors, 0,
+ dev->bounce_lpar, &dev->tag);
+ } else {
+ res = lv1_storage_read(dev->sbd.dev_id, region_id,
+ start_sector, sectors, 0,
+ dev->bounce_lpar, &dev->tag);
+ }
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
+ __LINE__, op, res);
+ end_request(req, 0);
+ return 0;
+ }
+
+ priv->req = req;
+ return 1;
+}
+
+static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
+ struct request *req)
+{
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ u64 res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__);
+
+ res = lv1_storage_send_device_command(dev->sbd.dev_id,
+ LV1_STORAGE_ATA_HDDOUT, 0, 0, 0,
+ 0, &dev->tag);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
+ __func__, __LINE__, res);
+ end_request(req, 0);
+ return 0;
+ }
+
+ priv->req = req;
+ return 1;
+}
+
+static void ps3disk_do_request(struct ps3_storage_device *dev,
+ request_queue_t *q)
+{
+ struct request *req;
+
+ dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
+
+ while ((req = elv_next_request(q))) {
+ if (blk_fs_request(req)) {
+ if (ps3disk_submit_request_sg(dev, req))
+ break;
+ } else if (req->cmd_type == REQ_TYPE_FLUSH) {
+ if (ps3disk_submit_flush_request(dev, req))
+ break;
+ } else {
+ blk_dump_rq_flags(req, DEVICE_NAME " bad request");
+ end_request(req, 0);
+ continue;
+ }
+ }
+}
+
+static void ps3disk_request(request_queue_t *q)
+{
+ struct ps3_storage_device *dev = q->queuedata;
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+
+ if (priv->req) {
+ dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
+ return;
+ }
+
+ ps3disk_do_request(dev, q);
+}
+
+static irqreturn_t ps3disk_interrupt(int irq, void *data)
+{
+ struct ps3_storage_device *dev = data;
+ struct ps3disk_private *priv;
+ struct request *req;
+ int res, read, uptodate;
+ u64 tag, status;
+ unsigned long num_sectors;
+ const char *op;
+
+ res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+
+ if (tag != dev->tag)
+ dev_err(&dev->sbd.core,
+ "%s:%u: tag mismatch, got %lx, expected %lx\n",
+ __func__, __LINE__, tag, dev->tag);
+
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
+ __func__, __LINE__, res, status);
+ return IRQ_HANDLED;
+ }
+
+ priv = dev->sbd.core.driver_data;
+ req = priv->req;
+ if (!req) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u non-block layer request completed\n", __func__,
+ __LINE__);
+ dev->lv1_status = status;
+ complete(&dev->done);
+ return IRQ_HANDLED;
+ }
+
+ if (req->cmd_type == REQ_TYPE_FLUSH) {
+ read = 0;
+ num_sectors = req->hard_cur_sectors;
+ op = "flush";
+ } else {
+ read = !rq_data_dir(req);
+ num_sectors = req->nr_sectors;
+ op = read ? "read" : "write";
+ }
+ if (status) {
+ dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+ __LINE__, op, status);
+ uptodate = 0;
+ } else {
+ dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
+ __LINE__, op);
+ uptodate = 1;
+ if (read)
+ ps3disk_scatter_gather(dev, req, 0);
+ }
+
+ spin_lock(&priv->lock);
+ if (!end_that_request_first(req, uptodate, num_sectors)) {
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req, uptodate);
+ }
+ priv->req = NULL;
+ ps3disk_do_request(dev, priv->queue);
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ps3disk_sync_cache(struct ps3_storage_device *dev)
+{
+ u64 res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: sync cache\n", __func__, __LINE__);
+
+ res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
+ __func__, __LINE__, res);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+/* ATA helpers copied from drivers/ata/libata-core.c */
+
+static void swap_buf_le16(u16 *buf, unsigned int buf_words)
+{
+#ifdef __BIG_ENDIAN
+ unsigned int i;
+
+ for (i = 0; i < buf_words; i++)
+ buf[i] = le16_to_cpu(buf[i]);
+#endif /* __BIG_ENDIAN */
+}
+
+static u64 ata_id_n_sectors(const u16 *id)
+{
+ if (ata_id_has_lba(id)) {
+ if (ata_id_has_lba48(id))
+ return ata_id_u64(id, 100);
+ else
+ return ata_id_u32(id, 60);
+ } else {
+ if (ata_id_current_chs_valid(id))
+ return ata_id_u32(id, 57);
+ else
+ return id[1] * id[3] * id[6];
+ }
+}
+
+static void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs,
+ unsigned int len)
+{
+ unsigned int c;
+
+ while (len > 0) {
+ c = id[ofs] >> 8;
+ *s = c;
+ s++;
+
+ c = id[ofs] & 0xff;
+ *s = c;
+ s++;
+
+ ofs++;
+ len -= 2;
+ }
+}
+
+static void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs,
+ unsigned int len)
+{
+ unsigned char *p;
+
+ WARN_ON(!(len & 1));
+
+ ata_id_string(id, s, ofs, len - 1);
+
+ p = s + strnlen(s, len - 1);
+ while (p > s && p[-1] == ' ')
+ p--;
+ *p = '\0';
+}
+
+static int ps3disk_identify(struct ps3_storage_device *dev)
+{
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ struct lv1_ata_cmnd_block ata_cmnd;
+ u16 *id = dev->bounce_buf;
+ u64 res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: identify disk\n", __func__, __LINE__);
+
+ memset(&ata_cmnd, 0, sizeof(struct lv1_ata_cmnd_block));
+ ata_cmnd.command = ATA_CMD_ID_ATA;
+ ata_cmnd.sector_count = 1;
+ ata_cmnd.size = ata_cmnd.arglen = ATA_ID_WORDS * 2;
+ ata_cmnd.buffer = dev->bounce_lpar;
+ ata_cmnd.proto = PIO_DATA_IN_PROTO;
+ ata_cmnd.in_out = DIR_READ;
+
+ res = ps3stor_send_command(dev, LV1_STORAGE_SEND_ATA_COMMAND,
+ ps3_mm_phys_to_lpar(__pa(&ata_cmnd)),
+ sizeof(ata_cmnd), ata_cmnd.buffer,
+ ata_cmnd.arglen);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%lx\n",
+ __func__, __LINE__, res);
+ return -EIO;
+ }
+
+ swap_buf_le16(id, ATA_ID_WORDS);
+
+ /* All we're interested in are raw capacity and model name */
+ priv->raw_capacity = ata_id_n_sectors(id);
+ ata_id_c_string(id, priv->model, ATA_ID_PROD, sizeof(priv->model));
+ return 0;
+}
+
+static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
+{
+ struct ps3_storage_device *dev = q->queuedata;
+
+ dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
+
+ memset(req->cmd, 0, sizeof(req->cmd));
+ req->cmd_type = REQ_TYPE_FLUSH;
+}
+
+static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk,
+ sector_t *sector)
+{
+ struct ps3_storage_device *dev = q->queuedata;
+ struct request *req;
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
+
+ req = blk_get_request(q, WRITE, __GFP_WAIT);
+ ps3disk_prepare_flush(q, req);
+ res = blk_execute_rq(q, gendisk, req, 0);
+ if (res)
+ dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
+ __func__, __LINE__, res);
+ blk_put_request(req);
+ return res;
+}
+
+
+static unsigned long ps3disk_mask;
+
+static DEFINE_MUTEX(ps3disk_mask_mutex);
+
+static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct ps3disk_private *priv;
+ int error;
+ unsigned int devidx;
+ struct request_queue *queue;
+ struct gendisk *gendisk;
+
+ if (dev->blk_size < 512) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: cannot handle block size %lu\n", __func__,
+ __LINE__, dev->blk_size);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(PS3DISK_MAX_DISKS > BITS_PER_LONG);
+ mutex_lock(&ps3disk_mask_mutex);
+ devidx = find_first_zero_bit(&ps3disk_mask, PS3DISK_MAX_DISKS);
+ if (devidx >= PS3DISK_MAX_DISKS) {
+ dev_err(&dev->sbd.core, "%s:%u: Too many disks\n", __func__,
+ __LINE__);
+ mutex_unlock(&ps3disk_mask_mutex);
+ return -ENOSPC;
+ }
+ __set_bit(devidx, &ps3disk_mask);
+ mutex_unlock(&ps3disk_mask_mutex);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dev->sbd.core.driver_data = priv;
+ spin_lock_init(&priv->lock);
+
+ dev->bounce_size = BOUNCE_SIZE;
+ dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
+ if (!dev->bounce_buf) {
+ error = -ENOMEM;
+ goto fail_free_priv;
+ }
+
+ error = ps3stor_setup(dev, ps3disk_interrupt);
+ if (error)
+ goto fail_free_bounce;
+
+ ps3disk_identify(dev);
+
+ queue = blk_init_queue(ps3disk_request, &priv->lock);
+ if (!queue) {
+ dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n",
+ __func__, __LINE__);
+ error = -ENOMEM;
+ goto fail_teardown;
+ }
+
+ priv->queue = queue;
+ queue->queuedata = dev;
+
+ blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
+
+ blk_queue_max_sectors(queue, dev->bounce_size >> 9);
+ blk_queue_segment_boundary(queue, -1UL);
+ blk_queue_dma_alignment(queue, dev->blk_size-1);
+ blk_queue_hardsect_size(queue, dev->blk_size);
+
+ blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
+ blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
+ ps3disk_prepare_flush);
+
+ blk_queue_max_phys_segments(queue, -1);
+ blk_queue_max_hw_segments(queue, -1);
+ blk_queue_max_segment_size(queue, dev->bounce_size);
+
+ gendisk = alloc_disk(PS3DISK_MINORS);
+ if (!gendisk) {
+ dev_err(&dev->sbd.core, "%s:%u: alloc_disk failed\n", __func__,
+ __LINE__);
+ error = -ENOMEM;
+ goto fail_cleanup_queue;
+ }
+
+ priv->gendisk = gendisk;
+ gendisk->major = ps3disk_major;
+ gendisk->first_minor = devidx * PS3DISK_MINORS;
+ gendisk->fops = &ps3disk_fops;
+ gendisk->queue = queue;
+ gendisk->private_data = dev;
+ gendisk->driverfs_dev = &dev->sbd.core;
+ snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
+ devidx+'a');
+ priv->blocking_factor = dev->blk_size >> 9;
+ set_capacity(gendisk,
+ dev->regions[dev->region_idx].size*priv->blocking_factor);
+
+ dev_info(&dev->sbd.core,
+ "%s is a %s (%lu MiB total, %lu MiB for OtherOS)\n",
+ gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
+ get_capacity(gendisk) >> 11);
+
+ add_disk(gendisk);
+ return 0;
+
+fail_cleanup_queue:
+ blk_cleanup_queue(queue);
+fail_teardown:
+ ps3stor_teardown(dev);
+fail_free_bounce:
+ kfree(dev->bounce_buf);
+fail_free_priv:
+ kfree(priv);
+ dev->sbd.core.driver_data = NULL;
+fail:
+ mutex_lock(&ps3disk_mask_mutex);
+ __clear_bit(devidx, &ps3disk_mask);
+ mutex_unlock(&ps3disk_mask_mutex);
+ return error;
+}
+
+static int ps3disk_remove(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+
+ mutex_lock(&ps3disk_mask_mutex);
+ __clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS,
+ &ps3disk_mask);
+ mutex_unlock(&ps3disk_mask_mutex);
+ del_gendisk(priv->gendisk);
+ blk_cleanup_queue(priv->queue);
+ put_disk(priv->gendisk);
+ dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
+ ps3disk_sync_cache(dev);
+ ps3stor_teardown(dev);
+ kfree(dev->bounce_buf);
+ kfree(priv);
+ dev->sbd.core.driver_data = NULL;
+ return 0;
+}
+
+static struct ps3_system_bus_driver ps3disk = {
+ .match_id = PS3_MATCH_ID_STOR_DISK,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3disk_probe,
+ .remove = ps3disk_remove,
+ .shutdown = ps3disk_remove,
+};
+
+
+static int __init ps3disk_init(void)
+{
+ int error;
+
+ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ return -ENODEV;
+
+ error = register_blkdev(0, DEVICE_NAME);
+ if (error <= 0) {
+ printk(KERN_ERR "%s:%u: register_blkdev failed %d\n", __func__,
+ __LINE__, error);
+ return error;
+ }
+ ps3disk_major = error;
+
+ pr_info("%s:%u: registered block device major %d\n", __func__,
+ __LINE__, ps3disk_major);
+
+ error = ps3_system_bus_driver_register(&ps3disk);
+ if (error)
+ unregister_blkdev(ps3disk_major, DEVICE_NAME);
+
+ return error;
+}
+
+static void __exit ps3disk_exit(void)
+{
+ ps3_system_bus_driver_unregister(&ps3disk);
+ unregister_blkdev(ps3disk_major, DEVICE_NAME);
+}
+
+module_init(ps3disk_init);
+module_exit(ps3disk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 Disk Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_DISK);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
new file mode 100644
index 00000000000..d50b8238115
--- /dev/null
+++ b/drivers/block/sunvdc.c
@@ -0,0 +1,887 @@
+/* sunvdc.c: Sun LDOM Virtual Disk Client.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include <asm/vio.h>
+#include <asm/ldc.h>
+
+#define DRV_MODULE_NAME "sunvdc"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "June 25, 2007"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define VDC_TX_RING_SIZE 256
+
+#define WAITING_FOR_LINK_UP 0x01
+#define WAITING_FOR_TX_SPACE 0x02
+#define WAITING_FOR_GEN_CMD 0x04
+#define WAITING_FOR_ANY -1
+
+struct vdc_req_entry {
+ struct request *req;
+};
+
+struct vdc_port {
+ struct vio_driver_state vio;
+
+ struct gendisk *disk;
+
+ struct vdc_completion *cmp;
+
+ u64 req_id;
+ u64 seq;
+ struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
+
+ unsigned long ring_cookies;
+
+ u64 max_xfer_size;
+ u32 vdisk_block_size;
+
+ /* The server fills these in for us in the disk attribute
+ * ACK packet.
+ */
+ u64 operations;
+ u32 vdisk_size;
+ u8 vdisk_type;
+
+ char disk_name[32];
+
+ struct vio_disk_geom geom;
+ struct vio_disk_vtoc label;
+};
+
+static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
+{
+ return container_of(vio, struct vdc_port, vio);
+}
+
+/* Ordered from largest major to lowest */
+static struct vio_version vdc_versions[] = {
+ { .major = 1, .minor = 0 },
+};
+
+#define VDCBLK_NAME "vdisk"
+static int vdc_major;
+#define PARTITION_SHIFT 3
+
+static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
+{
+ return vio_dring_avail(dr, VDC_TX_RING_SIZE);
+}
+
+static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct vdc_port *port = disk->private_data;
+
+ geo->heads = (u8) port->geom.num_hd;
+ geo->sectors = (u8) port->geom.num_sec;
+ geo->cylinders = port->geom.num_cyl;
+
+ return 0;
+}
+
+static struct block_device_operations vdc_fops = {
+ .owner = THIS_MODULE,
+ .getgeo = vdc_getgeo,
+};
+
+static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
+{
+ if (vio->cmp &&
+ (waiting_for == -1 ||
+ vio->cmp->waiting_for == waiting_for)) {
+ vio->cmp->err = err;
+ complete(&vio->cmp->com);
+ vio->cmp = NULL;
+ }
+}
+
+static void vdc_handshake_complete(struct vio_driver_state *vio)
+{
+ vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
+}
+
+static int vdc_handle_unknown(struct vdc_port *port, void *arg)
+{
+ struct vio_msg_tag *pkt = arg;
+
+ printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
+ pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
+ printk(KERN_ERR PFX "Resetting connection.\n");
+
+ ldc_disconnect(port->vio.lp);
+
+ return -ECONNRESET;
+}
+
+static int vdc_send_attr(struct vio_driver_state *vio)
+{
+ struct vdc_port *port = to_vdc_port(vio);
+ struct vio_disk_attr_info pkt;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.tag.type = VIO_TYPE_CTRL;
+ pkt.tag.stype = VIO_SUBTYPE_INFO;
+ pkt.tag.stype_env = VIO_ATTR_INFO;
+ pkt.tag.sid = vio_send_sid(vio);
+
+ pkt.xfer_mode = VIO_DRING_MODE;
+ pkt.vdisk_block_size = port->vdisk_block_size;
+ pkt.max_xfer_size = port->max_xfer_size;
+
+ viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
+ pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
+
+ return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
+}
+
+static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
+{
+ struct vdc_port *port = to_vdc_port(vio);
+ struct vio_disk_attr_info *pkt = arg;
+
+ viodbg(HS, "GOT ATTR stype[0x%x] ops[%lx] disk_size[%lu] disk_type[%x] "
+ "xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
+ pkt->tag.stype, pkt->operations,
+ pkt->vdisk_size, pkt->vdisk_type,
+ pkt->xfer_mode, pkt->vdisk_block_size,
+ pkt->max_xfer_size);
+
+ if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
+ switch (pkt->vdisk_type) {
+ case VD_DISK_TYPE_DISK:
+ case VD_DISK_TYPE_SLICE:
+ break;
+
+ default:
+ printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
+ vio->name, pkt->vdisk_type);
+ return -ECONNRESET;
+ }
+
+ if (pkt->vdisk_block_size > port->vdisk_block_size) {
+ printk(KERN_ERR PFX "%s: BLOCK size increased "
+ "%u --> %u\n",
+ vio->name,
+ port->vdisk_block_size, pkt->vdisk_block_size);
+ return -ECONNRESET;
+ }
+
+ port->operations = pkt->operations;
+ port->vdisk_size = pkt->vdisk_size;
+ port->vdisk_type = pkt->vdisk_type;
+ if (pkt->max_xfer_size < port->max_xfer_size)
+ port->max_xfer_size = pkt->max_xfer_size;
+ port->vdisk_block_size = pkt->vdisk_block_size;
+ return 0;
+ } else {
+ printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
+
+ return -ECONNRESET;
+ }
+}
+
+static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
+{
+ int err = desc->status;
+
+ vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
+}
+
+static void vdc_end_request(struct request *req, int uptodate, int num_sectors)
+{
+ if (end_that_request_first(req, uptodate, num_sectors))
+ return;
+ add_disk_randomness(req->rq_disk);
+ end_that_request_last(req, uptodate);
+}
+
+static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
+ unsigned int index)
+{
+ struct vio_disk_desc *desc = vio_dring_entry(dr, index);
+ struct vdc_req_entry *rqe = &port->rq_arr[index];
+ struct request *req;
+
+ if (unlikely(desc->hdr.state != VIO_DESC_DONE))
+ return;
+
+ ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
+ desc->hdr.state = VIO_DESC_FREE;
+ dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
+
+ req = rqe->req;
+ if (req == NULL) {
+ vdc_end_special(port, desc);
+ return;
+ }
+
+ rqe->req = NULL;
+
+ vdc_end_request(req, !desc->status, desc->size >> 9);
+
+ if (blk_queue_stopped(port->disk->queue))
+ blk_start_queue(port->disk->queue);
+}
+
+static int vdc_ack(struct vdc_port *port, void *msgbuf)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data *pkt = msgbuf;
+
+ if (unlikely(pkt->dring_ident != dr->ident ||
+ pkt->start_idx != pkt->end_idx ||
+ pkt->start_idx >= VDC_TX_RING_SIZE))
+ return 0;
+
+ vdc_end_one(port, dr, pkt->start_idx);
+
+ return 0;
+}
+
+static int vdc_nack(struct vdc_port *port, void *msgbuf)
+{
+ /* XXX Implement me XXX */
+ return 0;
+}
+
+static void vdc_event(void *arg, int event)
+{
+ struct vdc_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&vio->lock, flags);
+
+ if (unlikely(event == LDC_EVENT_RESET ||
+ event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+ return;
+ }
+
+ if (unlikely(event != LDC_EVENT_DATA_READY)) {
+ printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+ return;
+ }
+
+ err = 0;
+ while (1) {
+ union {
+ struct vio_msg_tag tag;
+ u64 raw[8];
+ } msgbuf;
+
+ err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+ if (unlikely(err < 0)) {
+ if (err == -ECONNRESET)
+ vio_conn_reset(vio);
+ break;
+ }
+ if (err == 0)
+ break;
+ viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+ msgbuf.tag.type,
+ msgbuf.tag.stype,
+ msgbuf.tag.stype_env,
+ msgbuf.tag.sid);
+ err = vio_validate_sid(vio, &msgbuf.tag);
+ if (err < 0)
+ break;
+
+ if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
+ if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
+ err = vdc_ack(port, &msgbuf);
+ else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
+ err = vdc_nack(port, &msgbuf);
+ else
+ err = vdc_handle_unknown(port, &msgbuf);
+ } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
+ err = vio_control_pkt_engine(vio, &msgbuf);
+ } else {
+ err = vdc_handle_unknown(port, &msgbuf);
+ }
+ if (err < 0)
+ break;
+ }
+ if (err < 0)
+ vdc_finish(&port->vio, err, WAITING_FOR_ANY);
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static int __vdc_tx_trigger(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_INFO,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = dr->prod,
+ .end_idx = dr->prod,
+ };
+ int err, delay;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ } while (err == -EAGAIN);
+
+ return err;
+}
+
+static int __send_request(struct request *req)
+{
+ struct vdc_port *port = req->rq_disk->private_data;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct scatterlist sg[port->ring_cookies];
+ struct vdc_req_entry *rqe;
+ struct vio_disk_desc *desc;
+ unsigned int map_perm;
+ int nsg, err, i;
+ u64 len;
+ u8 op;
+
+ map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
+
+ if (rq_data_dir(req) == READ) {
+ map_perm |= LDC_MAP_W;
+ op = VD_OP_BREAD;
+ } else {
+ map_perm |= LDC_MAP_R;
+ op = VD_OP_BWRITE;
+ }
+
+ nsg = blk_rq_map_sg(req->q, req, sg);
+
+ len = 0;
+ for (i = 0; i < nsg; i++)
+ len += sg[i].length;
+
+ if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
+ blk_stop_queue(port->disk->queue);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ desc = vio_dring_cur(dr);
+
+ err = ldc_map_sg(port->vio.lp, sg, nsg,
+ desc->cookies, port->ring_cookies,
+ map_perm);
+ if (err < 0) {
+ printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
+ return err;
+ }
+
+ rqe = &port->rq_arr[dr->prod];
+ rqe->req = req;
+
+ desc->hdr.ack = VIO_ACK_ENABLE;
+ desc->req_id = port->req_id;
+ desc->operation = op;
+ if (port->vdisk_type == VD_DISK_TYPE_DISK) {
+ desc->slice = 2;
+ } else {
+ desc->slice = 0;
+ }
+ desc->status = ~0;
+ desc->offset = (req->sector << 9) / port->vdisk_block_size;
+ desc->size = len;
+ desc->ncookies = err;
+
+ /* This has to be a non-SMP write barrier because we are writing
+ * to memory which is shared with the peer LDOM.
+ */
+ wmb();
+ desc->hdr.state = VIO_DESC_READY;
+
+ err = __vdc_tx_trigger(port);
+ if (err < 0) {
+ printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
+ } else {
+ port->req_id++;
+ dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ }
+out:
+
+ return err;
+}
+
+static void do_vdc_request(request_queue_t *q)
+{
+ while (1) {
+ struct request *req = elv_next_request(q);
+
+ if (!req)
+ break;
+
+ blkdev_dequeue_request(req);
+ if (__send_request(req) < 0)
+ vdc_end_request(req, 0, req->hard_nr_sectors);
+ }
+}
+
+static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
+{
+ struct vio_dring_state *dr;
+ struct vio_completion comp;
+ struct vio_disk_desc *desc;
+ unsigned int map_perm;
+ unsigned long flags;
+ int op_len, err;
+ void *req_buf;
+
+ if (!(((u64)1 << ((u64)op - 1)) & port->operations))
+ return -EOPNOTSUPP;
+
+ switch (op) {
+ case VD_OP_BREAD:
+ case VD_OP_BWRITE:
+ default:
+ return -EINVAL;
+
+ case VD_OP_FLUSH:
+ op_len = 0;
+ map_perm = 0;
+ break;
+
+ case VD_OP_GET_WCE:
+ op_len = sizeof(u32);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_SET_WCE:
+ op_len = sizeof(u32);
+ map_perm = LDC_MAP_R;
+ break;
+
+ case VD_OP_GET_VTOC:
+ op_len = sizeof(struct vio_disk_vtoc);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_SET_VTOC:
+ op_len = sizeof(struct vio_disk_vtoc);
+ map_perm = LDC_MAP_R;
+ break;
+
+ case VD_OP_GET_DISKGEOM:
+ op_len = sizeof(struct vio_disk_geom);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_SET_DISKGEOM:
+ op_len = sizeof(struct vio_disk_geom);
+ map_perm = LDC_MAP_R;
+ break;
+
+ case VD_OP_SCSICMD:
+ op_len = 16;
+ map_perm = LDC_MAP_RW;
+ break;
+
+ case VD_OP_GET_DEVID:
+ op_len = sizeof(struct vio_disk_devid);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_GET_EFI:
+ case VD_OP_SET_EFI:
+ return -EOPNOTSUPP;
+ break;
+ };
+
+ map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
+
+ op_len = (op_len + 7) & ~7;
+ req_buf = kzalloc(op_len, GFP_KERNEL);
+ if (!req_buf)
+ return -ENOMEM;
+
+ if (len > op_len)
+ len = op_len;
+
+ if (map_perm & LDC_MAP_R)
+ memcpy(req_buf, buf, len);
+
+ spin_lock_irqsave(&port->vio.lock, flags);
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ /* XXX If we want to use this code generically we have to
+ * XXX handle TX ring exhaustion etc.
+ */
+ desc = vio_dring_cur(dr);
+
+ err = ldc_map_single(port->vio.lp, req_buf, op_len,
+ desc->cookies, port->ring_cookies,
+ map_perm);
+ if (err < 0) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ kfree(req_buf);
+ return err;
+ }
+
+ init_completion(&comp.com);
+ comp.waiting_for = WAITING_FOR_GEN_CMD;
+ port->vio.cmp = &comp;
+
+ desc->hdr.ack = VIO_ACK_ENABLE;
+ desc->req_id = port->req_id;
+ desc->operation = op;
+ desc->slice = 0;
+ desc->status = ~0;
+ desc->offset = 0;
+ desc->size = op_len;
+ desc->ncookies = err;
+
+ /* This has to be a non-SMP write barrier because we are writing
+ * to memory which is shared with the peer LDOM.
+ */
+ wmb();
+ desc->hdr.state = VIO_DESC_READY;
+
+ err = __vdc_tx_trigger(port);
+ if (err >= 0) {
+ port->req_id++;
+ dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+ wait_for_completion(&comp.com);
+ err = comp.err;
+ } else {
+ port->vio.cmp = NULL;
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ }
+
+ if (map_perm & LDC_MAP_W)
+ memcpy(buf, req_buf, len);
+
+ kfree(req_buf);
+
+ return err;
+}
+
+static int __devinit vdc_alloc_tx_ring(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ unsigned long len, entry_size;
+ int ncookies;
+ void *dring;
+
+ entry_size = sizeof(struct vio_disk_desc) +
+ (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
+ len = (VDC_TX_RING_SIZE * entry_size);
+
+ ncookies = VIO_MAX_RING_COOKIES;
+ dring = ldc_alloc_exp_dring(port->vio.lp, len,
+ dr->cookies, &ncookies,
+ (LDC_MAP_SHADOW |
+ LDC_MAP_DIRECT |
+ LDC_MAP_RW));
+ if (IS_ERR(dring))
+ return PTR_ERR(dring);
+
+ dr->base = dring;
+ dr->entry_size = entry_size;
+ dr->num_entries = VDC_TX_RING_SIZE;
+ dr->prod = dr->cons = 0;
+ dr->pending = VDC_TX_RING_SIZE;
+ dr->ncookies = ncookies;
+
+ return 0;
+}
+
+static void vdc_free_tx_ring(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ if (dr->base) {
+ ldc_free_exp_dring(port->vio.lp, dr->base,
+ (dr->entry_size * dr->num_entries),
+ dr->cookies, dr->ncookies);
+ dr->base = NULL;
+ dr->entry_size = 0;
+ dr->num_entries = 0;
+ dr->pending = 0;
+ dr->ncookies = 0;
+ }
+}
+
+static int probe_disk(struct vdc_port *port)
+{
+ struct vio_completion comp;
+ struct request_queue *q;
+ struct gendisk *g;
+ int err;
+
+ init_completion(&comp.com);
+ comp.err = 0;
+ comp.waiting_for = WAITING_FOR_LINK_UP;
+ port->vio.cmp = &comp;
+
+ vio_port_up(&port->vio);
+
+ wait_for_completion(&comp.com);
+ if (comp.err)
+ return comp.err;
+
+ err = generic_request(port, VD_OP_GET_VTOC,
+ &port->label, sizeof(port->label));
+ if (err < 0) {
+ printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
+ return err;
+ }
+
+ err = generic_request(port, VD_OP_GET_DISKGEOM,
+ &port->geom, sizeof(port->geom));
+ if (err < 0) {
+ printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
+ "error %d\n", err);
+ return err;
+ }
+
+ port->vdisk_size = ((u64)port->geom.num_cyl *
+ (u64)port->geom.num_hd *
+ (u64)port->geom.num_sec);
+
+ q = blk_init_queue(do_vdc_request, &port->vio.lock);
+ if (!q) {
+ printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
+ port->vio.name);
+ return -ENOMEM;
+ }
+ g = alloc_disk(1 << PARTITION_SHIFT);
+ if (!g) {
+ printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
+ port->vio.name);
+ blk_cleanup_queue(q);
+ return -ENOMEM;
+ }
+
+ port->disk = g;
+
+ blk_queue_max_hw_segments(q, port->ring_cookies);
+ blk_queue_max_phys_segments(q, port->ring_cookies);
+ blk_queue_max_sectors(q, port->max_xfer_size);
+ g->major = vdc_major;
+ g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
+ strcpy(g->disk_name, port->disk_name);
+
+ g->fops = &vdc_fops;
+ g->queue = q;
+ g->private_data = port;
+ g->driverfs_dev = &port->vio.vdev->dev;
+
+ set_capacity(g, port->vdisk_size);
+
+ printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
+ g->disk_name,
+ port->vdisk_size, (port->vdisk_size >> (20 - 9)));
+
+ add_disk(g);
+
+ return 0;
+}
+
+static struct ldc_channel_config vdc_ldc_cfg = {
+ .event = vdc_event,
+ .mtu = 64,
+ .mode = LDC_MODE_UNRELIABLE,
+};
+
+static struct vio_driver_ops vdc_vio_ops = {
+ .send_attr = vdc_send_attr,
+ .handle_attr = vdc_handle_attr,
+ .handshake_complete = vdc_handshake_complete,
+};
+
+static void print_version(void)
+{
+ static int version_printed;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static int __devinit vdc_port_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct mdesc_handle *hp;
+ struct vdc_port *port;
+ int err;
+
+ print_version();
+
+ hp = mdesc_grab();
+
+ err = -ENODEV;
+ if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
+ printk(KERN_ERR PFX "Port id [%lu] too large.\n",
+ vdev->dev_no);
+ goto err_out_release_mdesc;
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!port) {
+ printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
+ goto err_out_release_mdesc;
+ }
+
+ if (vdev->dev_no >= 26)
+ snprintf(port->disk_name, sizeof(port->disk_name),
+ VDCBLK_NAME "%c%c",
+ 'a' + ((int)vdev->dev_no / 26) - 1,
+ 'a' + ((int)vdev->dev_no % 26));
+ else
+ snprintf(port->disk_name, sizeof(port->disk_name),
+ VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
+
+ err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
+ vdc_versions, ARRAY_SIZE(vdc_versions),
+ &vdc_vio_ops, port->disk_name);
+ if (err)
+ goto err_out_free_port;
+
+ port->vdisk_block_size = 512;
+ port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
+ port->ring_cookies = ((port->max_xfer_size *
+ port->vdisk_block_size) / PAGE_SIZE) + 2;
+
+ err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
+ if (err)
+ goto err_out_free_port;
+
+ err = vdc_alloc_tx_ring(port);
+ if (err)
+ goto err_out_free_ldc;
+
+ err = probe_disk(port);
+ if (err)
+ goto err_out_free_tx_ring;
+
+ dev_set_drvdata(&vdev->dev, port);
+
+ mdesc_release(hp);
+
+ return 0;
+
+err_out_free_tx_ring:
+ vdc_free_tx_ring(port);
+
+err_out_free_ldc:
+ vio_ldc_free(&port->vio);
+
+err_out_free_port:
+ kfree(port);
+
+err_out_release_mdesc:
+ mdesc_release(hp);
+ return err;
+}
+
+static int vdc_port_remove(struct vio_dev *vdev)
+{
+ struct vdc_port *port = dev_get_drvdata(&vdev->dev);
+
+ if (port) {
+ del_timer_sync(&port->vio.timer);
+
+ vdc_free_tx_ring(port);
+ vio_ldc_free(&port->vio);
+
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ kfree(port);
+ }
+ return 0;
+}
+
+static struct vio_device_id vdc_port_match[] = {
+ {
+ .type = "vdc-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(vio, vdc_port_match);
+
+static struct vio_driver vdc_port_driver = {
+ .id_table = vdc_port_match,
+ .probe = vdc_port_probe,
+ .remove = vdc_port_remove,
+ .driver = {
+ .name = "vdc_port",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init vdc_init(void)
+{
+ int err;
+
+ err = register_blkdev(0, VDCBLK_NAME);
+ if (err < 0)
+ goto out_err;
+
+ vdc_major = err;
+
+ err = vio_register_driver(&vdc_port_driver);
+ if (err)
+ goto out_unregister_blkdev;
+
+ return 0;
+
+out_unregister_blkdev:
+ unregister_blkdev(vdc_major, VDCBLK_NAME);
+ vdc_major = 0;
+
+out_err:
+ return err;
+}
+
+static void __exit vdc_exit(void)
+{
+ vio_unregister_driver(&vdc_port_driver);
+ unregister_blkdev(vdc_major, VDCBLK_NAME);
+}
+
+module_init(vdc_init);
+module_exit(vdc_exit);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 54509eb3391..949ae93499e 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1608,7 +1608,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
- host = kmalloc(sizeof(*host), GFP_KERNEL);
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host) {
printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
pci_name(pdev));
@@ -1616,7 +1616,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_regions;
}
- memset(host, 0, sizeof(*host));
host->pdev = pdev;
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 746a118a9b5..8b13d7d2cb6 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1547,10 +1547,8 @@ static void ub_reset_enter(struct ub_dev *sc, int try)
#endif
#if 0 /* We let them stop themselves. */
- struct list_head *p;
struct ub_lun *lun;
- list_for_each(p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
blk_stop_queue(lun->disk->queue);
}
#endif
@@ -1562,7 +1560,6 @@ static void ub_reset_task(struct work_struct *work)
{
struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
- struct list_head *p;
struct ub_lun *lun;
int lkr, rc;
@@ -1608,8 +1605,7 @@ static void ub_reset_task(struct work_struct *work)
spin_lock_irqsave(sc->lock, flags);
sc->reset = 0;
tasklet_schedule(&sc->tasklet);
- list_for_each(p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
blk_start_queue(lun->disk->queue);
}
wake_up(&sc->reset_wait);
@@ -1713,7 +1709,7 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp,
struct gendisk *disk = inode->i_bdev->bd_disk;
void __user *usermem = (void __user *) arg;
- return scsi_cmd_ioctl(filp, disk, cmd, usermem);
+ return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);
}
/*
@@ -2348,7 +2344,6 @@ err_alloc:
static void ub_disconnect(struct usb_interface *intf)
{
struct ub_dev *sc = usb_get_intfdata(intf);
- struct list_head *p;
struct ub_lun *lun;
unsigned long flags;
@@ -2403,8 +2398,7 @@ static void ub_disconnect(struct usb_interface *intf)
/*
* Unregister the upper layer.
*/
- list_for_each (p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
del_gendisk(lun->disk);
/*
* I wish I could do:
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 6f5d6203d72..dec74bd2349 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -105,12 +105,6 @@ struct cardinfo {
unsigned long csr_base;
unsigned char __iomem *csr_remap;
unsigned long csr_len;
-#ifdef CONFIG_MM_MAP_MEMORY
- unsigned long mem_base;
- unsigned char __iomem *mem_remap;
- unsigned long mem_len;
-#endif
-
unsigned int win_size; /* PCI window size */
unsigned int mm_size; /* size in kbytes */
@@ -872,10 +866,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
card->csr_base = pci_resource_start(dev, 0);
card->csr_len = pci_resource_len(dev, 0);
-#ifdef CONFIG_MM_MAP_MEMORY
- card->mem_base = pci_resource_start(dev, 1);
- card->mem_len = pci_resource_len(dev, 1);
-#endif
printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n",
card->card_number, dev->bus->number, dev->devfn);
@@ -903,27 +893,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
printk(KERN_INFO "MM%d: CSR 0x%08lx -> 0x%p (0x%lx)\n", card->card_number,
card->csr_base, card->csr_remap, card->csr_len);
-#ifdef CONFIG_MM_MAP_MEMORY
- if (!request_mem_region(card->mem_base, card->mem_len, "Micro Memory")) {
- printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
- ret = -ENOMEM;
-
- goto failed_req_mem;
- }
-
- if (!(card->mem_remap = ioremap(card->mem_base, cards->mem_len))) {
- printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
- ret = -ENOMEM;
-
- goto failed_remap_mem;
- }
-
- printk(KERN_INFO "MM%d: MEM 0x%8lx -> 0x%8lx (0x%lx)\n", card->card_number,
- card->mem_base, card->mem_remap, card->mem_len);
-#else
- printk(KERN_INFO "MM%d: MEM area not remapped (CONFIG_MM_MAP_MEMORY not set)\n",
- card->card_number);
-#endif
switch(card->dev->device) {
case 0x5415:
card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG;
@@ -1091,12 +1060,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
failed_magic:
-#ifdef CONFIG_MM_MAP_MEMORY
- iounmap(card->mem_remap);
- failed_remap_mem:
- release_mem_region(card->mem_base, card->mem_len);
- failed_req_mem:
-#endif
iounmap(card->csr_remap);
failed_remap_csr:
release_mem_region(card->csr_base, card->csr_len);
@@ -1116,10 +1079,6 @@ static void mm_pci_remove(struct pci_dev *dev)
tasklet_kill(&card->tasklet);
iounmap(card->csr_remap);
release_mem_region(card->csr_base, card->csr_len);
-#ifdef CONFIG_MM_MAP_MEMORY
- iounmap(card->mem_remap);
- release_mem_region(card->mem_base, card->mem_len);
-#endif
free_irq(card->irq, card);
if (card->mm_pages[0].desc)
@@ -1133,23 +1092,18 @@ static void mm_pci_remove(struct pci_dev *dev)
blk_cleanup_queue(card->queue);
}
-static const struct pci_device_id mm_pci_ids[] = { {
- .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
- .device = PCI_DEVICE_ID_MICRO_MEMORY_5415CN,
- }, {
- .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
- .device = PCI_DEVICE_ID_MICRO_MEMORY_5425CN,
- }, {
- .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
- .device = PCI_DEVICE_ID_MICRO_MEMORY_6155,
- }, {
+static const struct pci_device_id mm_pci_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_5415CN)},
+ {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_5425CN)},
+ {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_6155)},
+ {
.vendor = 0x8086,
.device = 0xB555,
.subvendor= 0x1332,
.subdevice= 0x5460,
.class = 0x050000,
.class_mask= 0,
- }, { /* end: all zeroes */ }
+ }, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, mm_pci_ids);
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 68592c33601..dae39911a11 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,10 +252,10 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct gendisk *disk = bdev->bd_disk;
struct viodasd_device *d = disk->private_data;
- geo->sectors = d->sectors ? d->sectors : 0;
+ geo->sectors = d->sectors ? d->sectors : 32;
geo->heads = d->tracks ? d->tracks : 64;
geo->cylinders = d->cylinders ? d->cylinders :
- get_capacity(disk) / (geo->cylinders * geo->heads);
+ get_capacity(disk) / (geo->sectors * geo->heads);
return 0;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
new file mode 100644
index 00000000000..6746c29181f
--- /dev/null
+++ b/drivers/block/xen-blkfront.c
@@ -0,0 +1,988 @@
+/*
+ * blkfront.c
+ *
+ * XenLinux virtual block device driver.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ * Copyright (c) 2004, Andrew Warfield
+ * Copyright (c) 2005, Christopher Clark
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/blkif.h>
+
+#include <asm/xen/hypervisor.h>
+
+enum blkif_state {
+ BLKIF_STATE_DISCONNECTED,
+ BLKIF_STATE_CONNECTED,
+ BLKIF_STATE_SUSPENDED,
+};
+
+struct blk_shadow {
+ struct blkif_request req;
+ unsigned long request;
+ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+static struct block_device_operations xlvbd_block_fops;
+
+#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'. They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+struct blkfront_info
+{
+ struct xenbus_device *xbdev;
+ dev_t dev;
+ struct gendisk *gd;
+ int vdevice;
+ blkif_vdev_t handle;
+ enum blkif_state connected;
+ int ring_ref;
+ struct blkif_front_ring ring;
+ unsigned int evtchn, irq;
+ struct request_queue *rq;
+ struct work_struct work;
+ struct gnttab_free_callback callback;
+ struct blk_shadow shadow[BLK_RING_SIZE];
+ unsigned long shadow_free;
+ int feature_barrier;
+
+ /**
+ * The number of people holding this device open. We won't allow a
+ * hot-unplug unless this is 0.
+ */
+ int users;
+};
+
+static DEFINE_SPINLOCK(blkif_io_lock);
+
+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
+#define GRANT_INVALID_REF 0
+
+#define PARTS_PER_DISK 16
+
+#define BLKIF_MAJOR(dev) ((dev)>>8)
+#define BLKIF_MINOR(dev) ((dev) & 0xff)
+
+#define DEV_NAME "xvd" /* name in /dev */
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+static LIST_HEAD(vbds_list);
+
+static int get_id_from_freelist(struct blkfront_info *info)
+{
+ unsigned long free = info->shadow_free;
+ BUG_ON(free > BLK_RING_SIZE);
+ info->shadow_free = info->shadow[free].req.id;
+ info->shadow[free].req.id = 0x0fffffee; /* debug */
+ return free;
+}
+
+static void add_id_to_freelist(struct blkfront_info *info,
+ unsigned long id)
+{
+ info->shadow[id].req.id = info->shadow_free;
+ info->shadow[id].request = 0;
+ info->shadow_free = id;
+}
+
+static void blkif_restart_queue_callback(void *arg)
+{
+ struct blkfront_info *info = (struct blkfront_info *)arg;
+ schedule_work(&info->work);
+}
+
+/*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ * virtual address in the guest os.
+ */
+static int blkif_queue_request(struct request *req)
+{
+ struct blkfront_info *info = req->rq_disk->private_data;
+ unsigned long buffer_mfn;
+ struct blkif_request *ring_req;
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int idx;
+ unsigned long id;
+ unsigned int fsect, lsect;
+ int ref;
+ grant_ref_t gref_head;
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+ return 1;
+
+ if (gnttab_alloc_grant_references(
+ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
+ gnttab_request_free_callback(
+ &info->callback,
+ blkif_restart_queue_callback,
+ info,
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ return 1;
+ }
+
+ /* Fill out a communications ring structure. */
+ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ id = get_id_from_freelist(info);
+ info->shadow[id].request = (unsigned long)req;
+
+ ring_req->id = id;
+ ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->handle = info->handle;
+
+ ring_req->operation = rq_data_dir(req) ?
+ BLKIF_OP_WRITE : BLKIF_OP_READ;
+ if (blk_barrier_rq(req))
+ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+
+ ring_req->nr_segments = 0;
+ rq_for_each_bio (bio, req) {
+ bio_for_each_segment (bvec, bio, idx) {
+ BUG_ON(ring_req->nr_segments
+ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
+ fsect = bvec->bv_offset >> 9;
+ lsect = fsect + (bvec->bv_len >> 9) - 1;
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ info->xbdev->otherend_id,
+ buffer_mfn,
+ rq_data_dir(req) );
+
+ info->shadow[id].frame[ring_req->nr_segments] =
+ mfn_to_pfn(buffer_mfn);
+
+ ring_req->seg[ring_req->nr_segments] =
+ (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect };
+
+ ring_req->nr_segments++;
+ }
+ }
+
+ info->ring.req_prod_pvt++;
+
+ /* Keep a private copy so we can reissue requests when recovering. */
+ info->shadow[id].req = *ring_req;
+
+ gnttab_free_grant_references(gref_head);
+
+ return 0;
+}
+
+
+static inline void flush_requests(struct blkfront_info *info)
+{
+ int notify;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
+
+ if (notify)
+ notify_remote_via_irq(info->irq);
+}
+
+/*
+ * do_blkif_request
+ * read a block; request is in a request queue
+ */
+static void do_blkif_request(request_queue_t *rq)
+{
+ struct blkfront_info *info = NULL;
+ struct request *req;
+ int queued;
+
+ pr_debug("Entered do_blkif_request\n");
+
+ queued = 0;
+
+ while ((req = elv_next_request(rq)) != NULL) {
+ info = req->rq_disk->private_data;
+ if (!blk_fs_request(req)) {
+ end_request(req, 0);
+ continue;
+ }
+
+ if (RING_FULL(&info->ring))
+ goto wait;
+
+ pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+ "(%u/%li) buffer:%p [%s]\n",
+ req, req->cmd, (unsigned long)req->sector,
+ req->current_nr_sectors,
+ req->nr_sectors, req->buffer,
+ rq_data_dir(req) ? "write" : "read");
+
+
+ blkdev_dequeue_request(req);
+ if (blkif_queue_request(req)) {
+ blk_requeue_request(rq, req);
+wait:
+ /* Avoid pointless unplugs. */
+ blk_stop_queue(rq);
+ break;
+ }
+
+ queued++;
+ }
+
+ if (queued != 0)
+ flush_requests(info);
+}
+
+static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
+{
+ request_queue_t *rq;
+
+ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+ if (rq == NULL)
+ return -1;
+
+ elevator_init(rq, "noop");
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_hardsect_size(rq, sector_size);
+ blk_queue_max_sectors(rq, 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
+ gd->queue = rq;
+
+ return 0;
+}
+
+
+static int xlvbd_barrier(struct blkfront_info *info)
+{
+ int err;
+
+ err = blk_queue_ordered(info->rq,
+ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
+ NULL);
+
+ if (err)
+ return err;
+
+ printk(KERN_INFO "blkfront: %s: barriers %s\n",
+ info->gd->disk_name,
+ info->feature_barrier ? "enabled" : "disabled");
+ return 0;
+}
+
+
+static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
+ int vdevice, u16 vdisk_info, u16 sector_size,
+ struct blkfront_info *info)
+{
+ struct gendisk *gd;
+ int nr_minors = 1;
+ int err = -ENODEV;
+
+ BUG_ON(info->gd != NULL);
+ BUG_ON(info->rq != NULL);
+
+ if ((minor % PARTS_PER_DISK) == 0)
+ nr_minors = PARTS_PER_DISK;
+
+ gd = alloc_disk(nr_minors);
+ if (gd == NULL)
+ goto out;
+
+ if (nr_minors > 1)
+ sprintf(gd->disk_name, "%s%c", DEV_NAME,
+ 'a' + minor / PARTS_PER_DISK);
+ else
+ sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
+ 'a' + minor / PARTS_PER_DISK,
+ minor % PARTS_PER_DISK);
+
+ gd->major = XENVBD_MAJOR;
+ gd->first_minor = minor;
+ gd->fops = &xlvbd_block_fops;
+ gd->private_data = info;
+ gd->driverfs_dev = &(info->xbdev->dev);
+ set_capacity(gd, capacity);
+
+ if (xlvbd_init_blk_queue(gd, sector_size)) {
+ del_gendisk(gd);
+ goto out;
+ }
+
+ info->rq = gd->queue;
+ info->gd = gd;
+
+ if (info->feature_barrier)
+ xlvbd_barrier(info);
+
+ if (vdisk_info & VDISK_READONLY)
+ set_disk_ro(gd, 1);
+
+ if (vdisk_info & VDISK_REMOVABLE)
+ gd->flags |= GENHD_FL_REMOVABLE;
+
+ if (vdisk_info & VDISK_CDROM)
+ gd->flags |= GENHD_FL_CD;
+
+ return 0;
+
+ out:
+ return err;
+}
+
+static void kick_pending_request_queues(struct blkfront_info *info)
+{
+ if (!RING_FULL(&info->ring)) {
+ /* Re-enable calldowns. */
+ blk_start_queue(info->rq);
+ /* Kick things off immediately. */
+ do_blkif_request(info->rq);
+ }
+}
+
+static void blkif_restart_queue(struct work_struct *work)
+{
+ struct blkfront_info *info = container_of(work, struct blkfront_info, work);
+
+ spin_lock_irq(&blkif_io_lock);
+ if (info->connected == BLKIF_STATE_CONNECTED)
+ kick_pending_request_queues(info);
+ spin_unlock_irq(&blkif_io_lock);
+}
+
+static void blkif_free(struct blkfront_info *info, int suspend)
+{
+ /* Prevent new requests being issued until we fix things up. */
+ spin_lock_irq(&blkif_io_lock);
+ info->connected = suspend ?
+ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
+ /* No more blkif_request(). */
+ if (info->rq)
+ blk_stop_queue(info->rq);
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&info->callback);
+ spin_unlock_irq(&blkif_io_lock);
+
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_scheduled_work();
+
+ /* Free resources associated with old device channel. */
+ if (info->ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ info->ring_ref = GRANT_INVALID_REF;
+ info->ring.sring = NULL;
+ }
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, info);
+ info->evtchn = info->irq = 0;
+
+}
+
+static void blkif_completion(struct blk_shadow *s)
+{
+ int i;
+ for (i = 0; i < s->req.nr_segments; i++)
+ gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+}
+
+static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+{
+ struct request *req;
+ struct blkif_response *bret;
+ RING_IDX i, rp;
+ unsigned long flags;
+ struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int uptodate;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ again:
+ rp = info->ring.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for (i = info->ring.rsp_cons; i != rp; i++) {
+ unsigned long id;
+ int ret;
+
+ bret = RING_GET_RESPONSE(&info->ring, i);
+ id = bret->id;
+ req = (struct request *)info->shadow[id].request;
+
+ blkif_completion(&info->shadow[id]);
+
+ add_id_to_freelist(info, id);
+
+ uptodate = (bret->status == BLKIF_RSP_OKAY);
+ switch (bret->operation) {
+ case BLKIF_OP_WRITE_BARRIER:
+ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
+ info->gd->disk_name);
+ uptodate = -EOPNOTSUPP;
+ info->feature_barrier = 0;
+ xlvbd_barrier(info);
+ }
+ /* fall through */
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ if (unlikely(bret->status != BLKIF_RSP_OKAY))
+ dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
+ "request: %x\n", bret->status);
+
+ ret = end_that_request_first(req, uptodate,
+ req->hard_nr_sectors);
+ BUG_ON(ret);
+ end_that_request_last(req, uptodate);
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ info->ring.rsp_cons = i;
+
+ if (i != info->ring.req_prod_pvt) {
+ int more_to_do;
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+ if (more_to_do)
+ goto again;
+ } else
+ info->ring.sring->rsp_event = i + 1;
+
+ kick_pending_request_queues(info);
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+static int setup_blkring(struct xenbus_device *dev,
+ struct blkfront_info *info)
+{
+ struct blkif_sring *sring;
+ int err;
+
+ info->ring_ref = GRANT_INVALID_REF;
+
+ sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
+ if (!sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+ return -ENOMEM;
+ }
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
+ if (err < 0) {
+ free_page((unsigned long)sring);
+ info->ring.sring = NULL;
+ goto fail;
+ }
+ info->ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err)
+ goto fail;
+
+ err = bind_evtchn_to_irqhandler(info->evtchn,
+ blkif_interrupt,
+ IRQF_SAMPLE_RANDOM, "blkif", info);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err,
+ "bind_evtchn_to_irqhandler failed");
+ goto fail;
+ }
+ info->irq = err;
+
+ return 0;
+fail:
+ blkif_free(info, 0);
+ return err;
+}
+
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+ struct blkfront_info *info)
+{
+ const char *message = NULL;
+ struct xenbus_transaction xbt;
+ int err;
+
+ /* Create shared ring, alloc event channel. */
+ err = setup_blkring(dev, info);
+ if (err)
+ goto out;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_blkring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename,
+ "ring-ref", "%u", info->ring_ref);
+ if (err) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename,
+ "event-channel", "%u", info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_blkring;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ if (message)
+ xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_blkring:
+ blkif_free(info, 0);
+ out:
+ return err;
+}
+
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and the ring buffer for communication with the backend, and
+ * inform the backend of the appropriate details for those. Switch to
+ * Initialised state.
+ */
+static int blkfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err, vdevice, i;
+ struct blkfront_info *info;
+
+ /* FIXME: Use dynamic device id if this is not set. */
+ err = xenbus_scanf(XBT_NIL, dev->nodename,
+ "virtual-device", "%i", &vdevice);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading virtual-device");
+ return err;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
+ return -ENOMEM;
+ }
+
+ info->xbdev = dev;
+ info->vdevice = vdevice;
+ info->connected = BLKIF_STATE_DISCONNECTED;
+ INIT_WORK(&info->work, blkif_restart_queue);
+
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ info->shadow[i].req.id = i+1;
+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+ /* Front end dir is a number, which is used as the id. */
+ info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
+ dev->dev.driver_data = info;
+
+ err = talk_to_backend(dev, info);
+ if (err) {
+ kfree(info);
+ dev->dev.driver_data = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+
+static int blkif_recover(struct blkfront_info *info)
+{
+ int i;
+ struct blkif_request *req;
+ struct blk_shadow *copy;
+ int j;
+
+ /* Stage 1: Make a safe copy of the shadow state. */
+ copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ memcpy(copy, info->shadow, sizeof(info->shadow));
+
+ /* Stage 2: Set up free list. */
+ memset(&info->shadow, 0, sizeof(info->shadow));
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ info->shadow[i].req.id = i+1;
+ info->shadow_free = info->ring.req_prod_pvt;
+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+ /* Stage 3: Find pending requests and requeue them. */
+ for (i = 0; i < BLK_RING_SIZE; i++) {
+ /* Not in use? */
+ if (copy[i].request == 0)
+ continue;
+
+ /* Grab a request slot and copy shadow state into it. */
+ req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ *req = copy[i].req;
+
+ /* We get a new request id, and must reset the shadow state. */
+ req->id = get_id_from_freelist(info);
+ memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
+
+ /* Rewrite any grant references invalidated by susp/resume. */
+ for (j = 0; j < req->nr_segments; j++)
+ gnttab_grant_foreign_access_ref(
+ req->seg[j].gref,
+ info->xbdev->otherend_id,
+ pfn_to_mfn(info->shadow[req->id].frame[j]),
+ rq_data_dir(
+ (struct request *)
+ info->shadow[req->id].request));
+ info->shadow[req->id].req = *req;
+
+ info->ring.req_prod_pvt++;
+ }
+
+ kfree(copy);
+
+ xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+ spin_lock_irq(&blkif_io_lock);
+
+ /* Now safe for us to use the shared ring */
+ info->connected = BLKIF_STATE_CONNECTED;
+
+ /* Send off requeued requests */
+ flush_requests(info);
+
+ /* Kick any other new requests queued since we resumed */
+ kick_pending_request_queues(info);
+
+ spin_unlock_irq(&blkif_io_lock);
+
+ return 0;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart. We tear down our blkif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int blkfront_resume(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ int err;
+
+ dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+
+ blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
+
+ err = talk_to_backend(dev, info);
+ if (info->connected == BLKIF_STATE_SUSPENDED && !err)
+ err = blkif_recover(info);
+
+ return err;
+}
+
+
+/*
+ * Invoked when the backend is finally 'ready' (and has told produced
+ * the details about the physical device - #sectors, size, etc).
+ */
+static void blkfront_connect(struct blkfront_info *info)
+{
+ unsigned long long sectors;
+ unsigned long sector_size;
+ unsigned int binfo;
+ int err;
+
+ if ((info->connected == BLKIF_STATE_CONNECTED) ||
+ (info->connected == BLKIF_STATE_SUSPENDED) )
+ return;
+
+ dev_dbg(&info->xbdev->dev, "%s:%s.\n",
+ __func__, info->xbdev->otherend);
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "sectors", "%llu", &sectors,
+ "info", "%u", &binfo,
+ "sector-size", "%lu", &sector_size,
+ NULL);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err,
+ "reading backend fields at %s",
+ info->xbdev->otherend);
+ return;
+ }
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%lu", &info->feature_barrier,
+ NULL);
+ if (err)
+ info->feature_barrier = 0;
+
+ err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
+ sectors, info->vdevice,
+ binfo, sector_size, info);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
+ info->xbdev->otherend);
+ return;
+ }
+
+ xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+ /* Kick pending requests. */
+ spin_lock_irq(&blkif_io_lock);
+ info->connected = BLKIF_STATE_CONNECTED;
+ kick_pending_request_queues(info);
+ spin_unlock_irq(&blkif_io_lock);
+
+ add_disk(info->gd);
+}
+
+/**
+ * Handle the change of state of the backend to Closing. We must delete our
+ * device-layer structures now, to ensure that writes are flushed through to
+ * the backend. Once is this done, we can switch to Closed in
+ * acknowledgement.
+ */
+static void blkfront_closing(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ unsigned long flags;
+
+ dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
+
+ if (info->rq == NULL)
+ goto out;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ del_gendisk(info->gd);
+
+ /* No more blkif_request(). */
+ blk_stop_queue(info->rq);
+
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&info->callback);
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_scheduled_work();
+
+ blk_cleanup_queue(info->rq);
+ info->rq = NULL;
+
+ out:
+ xenbus_frontend_closed(dev);
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ struct block_device *bd;
+
+ dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
+
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+
+ case XenbusStateConnected:
+ blkfront_connect(info);
+ break;
+
+ case XenbusStateClosing:
+ bd = bdget(info->dev);
+ if (bd == NULL)
+ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
+
+ mutex_lock(&bd->bd_mutex);
+ if (info->users > 0)
+ xenbus_dev_error(dev, -EBUSY,
+ "Device in use; refusing to close");
+ else
+ blkfront_closing(dev);
+ mutex_unlock(&bd->bd_mutex);
+ bdput(bd);
+ break;
+ }
+}
+
+static int blkfront_remove(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
+
+ blkif_free(info, 0);
+
+ kfree(info);
+
+ return 0;
+}
+
+static int blkif_open(struct inode *inode, struct file *filep)
+{
+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ info->users++;
+ return 0;
+}
+
+static int blkif_release(struct inode *inode, struct file *filep)
+{
+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ info->users--;
+ if (info->users == 0) {
+ /* Check whether we have been instructed to close. We will
+ have ignored this request initially, as the device was
+ still mounted. */
+ struct xenbus_device *dev = info->xbdev;
+ enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
+
+ if (state == XenbusStateClosing)
+ blkfront_closing(dev);
+ }
+ return 0;
+}
+
+static struct block_device_operations xlvbd_block_fops =
+{
+ .owner = THIS_MODULE,
+ .open = blkif_open,
+ .release = blkif_release,
+};
+
+
+static struct xenbus_device_id blkfront_ids[] = {
+ { "vbd" },
+ { "" }
+};
+
+static struct xenbus_driver blkfront = {
+ .name = "vbd",
+ .owner = THIS_MODULE,
+ .ids = blkfront_ids,
+ .probe = blkfront_probe,
+ .remove = blkfront_remove,
+ .resume = blkfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init xlblk_init(void)
+{
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+ printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
+ XENVBD_MAJOR, DEV_NAME);
+ return -ENODEV;
+ }
+
+ return xenbus_register_frontend(&blkfront);
+}
+module_init(xlblk_init);
+
+
+static void xlblk_exit(void)
+{
+ return xenbus_unregister_driver(&blkfront);
+}
+module_exit(xlblk_exit);
+
+MODULE_DESCRIPTION("Xen virtual block device frontend");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
new file mode 100644
index 00000000000..732ec63b6e9
--- /dev/null
+++ b/drivers/block/xsysace.c
@@ -0,0 +1,1164 @@
+/*
+ * Xilinx SystemACE device driver
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+/*
+ * The SystemACE chip is designed to configure FPGAs by loading an FPGA
+ * bitstream from a file on a CF card and squirting it into FPGAs connected
+ * to the SystemACE JTAG chain. It also has the advantage of providing an
+ * MPU interface which can be used to control the FPGA configuration process
+ * and to use the attached CF card for general purpose storage.
+ *
+ * This driver is a block device driver for the SystemACE.
+ *
+ * Initialization:
+ * The driver registers itself as a platform_device driver at module
+ * load time. The platform bus will take care of calling the
+ * ace_probe() method for all SystemACE instances in the system. Any
+ * number of SystemACE instances are supported. ace_probe() calls
+ * ace_setup() which initialized all data structures, reads the CF
+ * id structure and registers the device.
+ *
+ * Processing:
+ * Just about all of the heavy lifting in this driver is performed by
+ * a Finite State Machine (FSM). The driver needs to wait on a number
+ * of events; some raised by interrupts, some which need to be polled
+ * for. Describing all of the behaviour in a FSM seems to be the
+ * easiest way to keep the complexity low and make it easy to
+ * understand what the driver is doing. If the block ops or the
+ * request function need to interact with the hardware, then they
+ * simply need to flag the request and kick of FSM processing.
+ *
+ * The FSM itself is atomic-safe code which can be run from any
+ * context. The general process flow is:
+ * 1. obtain the ace->lock spinlock.
+ * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
+ * cleared.
+ * 3. release the lock.
+ *
+ * Individual states do not sleep in any way. If a condition needs to
+ * be waited for then the state much clear the fsm_continue flag and
+ * either schedule the FSM to be run again at a later time, or expect
+ * an interrupt to call the FSM when the desired condition is met.
+ *
+ * In normal operation, the FSM is processed at interrupt context
+ * either when the driver's tasklet is scheduled, or when an irq is
+ * raised by the hardware. The tasklet can be scheduled at any time.
+ * The request method in particular schedules the tasklet when a new
+ * request has been indicated by the block layer. Once started, the
+ * FSM proceeds as far as it can processing the request until it
+ * needs on a hardware event. At this point, it must yield execution.
+ *
+ * A state has two options when yielding execution:
+ * 1. ace_fsm_yield()
+ * - Call if need to poll for event.
+ * - clears the fsm_continue flag to exit the processing loop
+ * - reschedules the tasklet to run again as soon as possible
+ * 2. ace_fsm_yieldirq()
+ * - Call if an irq is expected from the HW
+ * - clears the fsm_continue flag to exit the processing loop
+ * - does not reschedule the tasklet so the FSM will not be processed
+ * again until an irq is received.
+ * After calling a yield function, the state must return control back
+ * to the FSM main loop.
+ *
+ * Additionally, the driver maintains a kernel timer which can process
+ * the FSM. If the FSM gets stalled, typically due to a missed
+ * interrupt, then the kernel timer will expire and the driver can
+ * continue where it left off.
+ *
+ * To Do:
+ * - Add FPGA configuration control interface.
+ * - Request major number from lanana
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/platform_device.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("Xilinx SystemACE device driver");
+MODULE_LICENSE("GPL");
+
+/* SystemACE register definitions */
+#define ACE_BUSMODE (0x00)
+
+#define ACE_STATUS (0x04)
+#define ACE_STATUS_CFGLOCK (0x00000001)
+#define ACE_STATUS_MPULOCK (0x00000002)
+#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */
+#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */
+#define ACE_STATUS_CFDETECT (0x00000010)
+#define ACE_STATUS_DATABUFRDY (0x00000020)
+#define ACE_STATUS_DATABUFMODE (0x00000040)
+#define ACE_STATUS_CFGDONE (0x00000080)
+#define ACE_STATUS_RDYFORCFCMD (0x00000100)
+#define ACE_STATUS_CFGMODEPIN (0x00000200)
+#define ACE_STATUS_CFGADDR_MASK (0x0000e000)
+#define ACE_STATUS_CFBSY (0x00020000)
+#define ACE_STATUS_CFRDY (0x00040000)
+#define ACE_STATUS_CFDWF (0x00080000)
+#define ACE_STATUS_CFDSC (0x00100000)
+#define ACE_STATUS_CFDRQ (0x00200000)
+#define ACE_STATUS_CFCORR (0x00400000)
+#define ACE_STATUS_CFERR (0x00800000)
+
+#define ACE_ERROR (0x08)
+#define ACE_CFGLBA (0x0c)
+#define ACE_MPULBA (0x10)
+
+#define ACE_SECCNTCMD (0x14)
+#define ACE_SECCNTCMD_RESET (0x0100)
+#define ACE_SECCNTCMD_IDENTIFY (0x0200)
+#define ACE_SECCNTCMD_READ_DATA (0x0300)
+#define ACE_SECCNTCMD_WRITE_DATA (0x0400)
+#define ACE_SECCNTCMD_ABORT (0x0600)
+
+#define ACE_VERSION (0x16)
+#define ACE_VERSION_REVISION_MASK (0x00FF)
+#define ACE_VERSION_MINOR_MASK (0x0F00)
+#define ACE_VERSION_MAJOR_MASK (0xF000)
+
+#define ACE_CTRL (0x18)
+#define ACE_CTRL_FORCELOCKREQ (0x0001)
+#define ACE_CTRL_LOCKREQ (0x0002)
+#define ACE_CTRL_FORCECFGADDR (0x0004)
+#define ACE_CTRL_FORCECFGMODE (0x0008)
+#define ACE_CTRL_CFGMODE (0x0010)
+#define ACE_CTRL_CFGSTART (0x0020)
+#define ACE_CTRL_CFGSEL (0x0040)
+#define ACE_CTRL_CFGRESET (0x0080)
+#define ACE_CTRL_DATABUFRDYIRQ (0x0100)
+#define ACE_CTRL_ERRORIRQ (0x0200)
+#define ACE_CTRL_CFGDONEIRQ (0x0400)
+#define ACE_CTRL_RESETIRQ (0x0800)
+#define ACE_CTRL_CFGPROG (0x1000)
+#define ACE_CTRL_CFGADDR_MASK (0xe000)
+
+#define ACE_FATSTAT (0x1c)
+
+#define ACE_NUM_MINORS 16
+#define ACE_SECTOR_SIZE (512)
+#define ACE_FIFO_SIZE (32)
+#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
+
+struct ace_reg_ops;
+
+struct ace_device {
+ /* driver state data */
+ int id;
+ int media_change;
+ int users;
+ struct list_head list;
+
+ /* finite state machine data */
+ struct tasklet_struct fsm_tasklet;
+ uint fsm_task; /* Current activity (ACE_TASK_*) */
+ uint fsm_state; /* Current state (ACE_FSM_STATE_*) */
+ uint fsm_continue_flag; /* cleared to exit FSM mainloop */
+ uint fsm_iter_num;
+ struct timer_list stall_timer;
+
+ /* Transfer state/result, use for both id and block request */
+ struct request *req; /* request being processed */
+ void *data_ptr; /* pointer to I/O buffer */
+ int data_count; /* number of buffers remaining */
+ int data_result; /* Result of transfer; 0 := success */
+
+ int id_req_count; /* count of id requests */
+ int id_result;
+ struct completion id_completion; /* used when id req finishes */
+ int in_irq;
+
+ /* Details of hardware device */
+ unsigned long physaddr;
+ void *baseaddr;
+ int irq;
+ int bus_width; /* 0 := 8 bit; 1 := 16 bit */
+ struct ace_reg_ops *reg_ops;
+ int lock_count;
+
+ /* Block device data structures */
+ spinlock_t lock;
+ struct device *dev;
+ struct request_queue *queue;
+ struct gendisk *gd;
+
+ /* Inserted CF card parameters */
+ struct hd_driveid cf_id;
+};
+
+static int ace_major;
+
+/* ---------------------------------------------------------------------
+ * Low level register access
+ */
+
+struct ace_reg_ops {
+ u16(*in) (struct ace_device * ace, int reg);
+ void (*out) (struct ace_device * ace, int reg, u16 val);
+ void (*datain) (struct ace_device * ace);
+ void (*dataout) (struct ace_device * ace);
+};
+
+/* 8 Bit bus width */
+static u16 ace_in_8(struct ace_device *ace, int reg)
+{
+ void *r = ace->baseaddr + reg;
+ return in_8(r) | (in_8(r + 1) << 8);
+}
+
+static void ace_out_8(struct ace_device *ace, int reg, u16 val)
+{
+ void *r = ace->baseaddr + reg;
+ out_8(r, val);
+ out_8(r + 1, val >> 8);
+}
+
+static void ace_datain_8(struct ace_device *ace)
+{
+ void *r = ace->baseaddr + 0x40;
+ u8 *dst = ace->data_ptr;
+ int i = ACE_FIFO_SIZE;
+ while (i--)
+ *dst++ = in_8(r++);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_8(struct ace_device *ace)
+{
+ void *r = ace->baseaddr + 0x40;
+ u8 *src = ace->data_ptr;
+ int i = ACE_FIFO_SIZE;
+ while (i--)
+ out_8(r++, *src++);
+ ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_8_ops = {
+ .in = ace_in_8,
+ .out = ace_out_8,
+ .datain = ace_datain_8,
+ .dataout = ace_dataout_8,
+};
+
+/* 16 bit big endian bus attachment */
+static u16 ace_in_be16(struct ace_device *ace, int reg)
+{
+ return in_be16(ace->baseaddr + reg);
+}
+
+static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
+{
+ out_be16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_be16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *dst = ace->data_ptr;
+ while (i--)
+ *dst++ = in_le16(ace->baseaddr + 0x40);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_be16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *src = ace->data_ptr;
+ while (i--)
+ out_le16(ace->baseaddr + 0x40, *src++);
+ ace->data_ptr = src;
+}
+
+/* 16 bit little endian bus attachment */
+static u16 ace_in_le16(struct ace_device *ace, int reg)
+{
+ return in_le16(ace->baseaddr + reg);
+}
+
+static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
+{
+ out_le16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_le16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *dst = ace->data_ptr;
+ while (i--)
+ *dst++ = in_be16(ace->baseaddr + 0x40);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_le16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *src = ace->data_ptr;
+ while (i--)
+ out_be16(ace->baseaddr + 0x40, *src++);
+ ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_be16_ops = {
+ .in = ace_in_be16,
+ .out = ace_out_be16,
+ .datain = ace_datain_be16,
+ .dataout = ace_dataout_be16,
+};
+
+static struct ace_reg_ops ace_reg_le16_ops = {
+ .in = ace_in_le16,
+ .out = ace_out_le16,
+ .datain = ace_datain_le16,
+ .dataout = ace_dataout_le16,
+};
+
+static inline u16 ace_in(struct ace_device *ace, int reg)
+{
+ return ace->reg_ops->in(ace, reg);
+}
+
+static inline u32 ace_in32(struct ace_device *ace, int reg)
+{
+ return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
+}
+
+static inline void ace_out(struct ace_device *ace, int reg, u16 val)
+{
+ ace->reg_ops->out(ace, reg, val);
+}
+
+static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
+{
+ ace_out(ace, reg, val);
+ ace_out(ace, reg + 2, val >> 16);
+}
+
+/* ---------------------------------------------------------------------
+ * Debug support functions
+ */
+
+#if defined(DEBUG)
+static void ace_dump_mem(void *base, int len)
+{
+ const char *ptr = base;
+ int i, j;
+
+ for (i = 0; i < len; i += 16) {
+ printk(KERN_INFO "%.8x:", i);
+ for (j = 0; j < 16; j++) {
+ if (!(j % 4))
+ printk(" ");
+ printk("%.2x", ptr[i + j]);
+ }
+ printk(" ");
+ for (j = 0; j < 16; j++)
+ printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
+ printk("\n");
+ }
+}
+#else
+static inline void ace_dump_mem(void *base, int len)
+{
+}
+#endif
+
+static void ace_dump_regs(struct ace_device *ace)
+{
+ dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
+ " status:%.8x mpu_lba:%.8x busmode:%4x\n"
+ " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
+ ace_in32(ace, ACE_CTRL),
+ ace_in(ace, ACE_SECCNTCMD),
+ ace_in(ace, ACE_VERSION),
+ ace_in32(ace, ACE_STATUS),
+ ace_in32(ace, ACE_MPULBA),
+ ace_in(ace, ACE_BUSMODE),
+ ace_in32(ace, ACE_ERROR),
+ ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
+}
+
+void ace_fix_driveid(struct hd_driveid *id)
+{
+#if defined(__BIG_ENDIAN)
+ u16 *buf = (void *)id;
+ int i;
+
+ /* All half words have wrong byte order; swap the bytes */
+ for (i = 0; i < sizeof(struct hd_driveid); i += 2, buf++)
+ *buf = le16_to_cpu(*buf);
+
+ /* Some of the data values are 32bit; swap the half words */
+ id->lba_capacity = ((id->lba_capacity >> 16) & 0x0000FFFF) |
+ ((id->lba_capacity << 16) & 0xFFFF0000);
+ id->spg = ((id->spg >> 16) & 0x0000FFFF) |
+ ((id->spg << 16) & 0xFFFF0000);
+#endif
+}
+
+/* ---------------------------------------------------------------------
+ * Finite State Machine (FSM) implementation
+ */
+
+/* FSM tasks; used to direct state transitions */
+#define ACE_TASK_IDLE 0
+#define ACE_TASK_IDENTIFY 1
+#define ACE_TASK_READ 2
+#define ACE_TASK_WRITE 3
+#define ACE_FSM_NUM_TASKS 4
+
+/* FSM state definitions */
+#define ACE_FSM_STATE_IDLE 0
+#define ACE_FSM_STATE_REQ_LOCK 1
+#define ACE_FSM_STATE_WAIT_LOCK 2
+#define ACE_FSM_STATE_WAIT_CFREADY 3
+#define ACE_FSM_STATE_IDENTIFY_PREPARE 4
+#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5
+#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6
+#define ACE_FSM_STATE_REQ_PREPARE 7
+#define ACE_FSM_STATE_REQ_TRANSFER 8
+#define ACE_FSM_STATE_REQ_COMPLETE 9
+#define ACE_FSM_STATE_ERROR 10
+#define ACE_FSM_NUM_STATES 11
+
+/* Set flag to exit FSM loop and reschedule tasklet */
+static inline void ace_fsm_yield(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "ace_fsm_yield()\n");
+ tasklet_schedule(&ace->fsm_tasklet);
+ ace->fsm_continue_flag = 0;
+}
+
+/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
+static inline void ace_fsm_yieldirq(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
+
+ if (ace->irq == NO_IRQ)
+ /* No IRQ assigned, so need to poll */
+ tasklet_schedule(&ace->fsm_tasklet);
+ ace->fsm_continue_flag = 0;
+}
+
+/* Get the next read/write request; ending requests that we don't handle */
+struct request *ace_get_next_request(request_queue_t * q)
+{
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
+ if (blk_fs_request(req))
+ break;
+ end_request(req, 0);
+ }
+ return req;
+}
+
+static void ace_fsm_dostate(struct ace_device *ace)
+{
+ struct request *req;
+ u32 status;
+ u16 val;
+ int count;
+ int i;
+
+#if defined(DEBUG)
+ dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
+ ace->fsm_state, ace->id_req_count);
+#endif
+
+ switch (ace->fsm_state) {
+ case ACE_FSM_STATE_IDLE:
+ /* See if there is anything to do */
+ if (ace->id_req_count || ace_get_next_request(ace->queue)) {
+ ace->fsm_iter_num++;
+ ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
+ mod_timer(&ace->stall_timer, jiffies + HZ);
+ if (!timer_pending(&ace->stall_timer))
+ add_timer(&ace->stall_timer);
+ break;
+ }
+ del_timer(&ace->stall_timer);
+ ace->fsm_continue_flag = 0;
+ break;
+
+ case ACE_FSM_STATE_REQ_LOCK:
+ if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+ /* Already have the lock, jump to next state */
+ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+ break;
+ }
+
+ /* Request the lock */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
+ ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
+ break;
+
+ case ACE_FSM_STATE_WAIT_LOCK:
+ if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+ /* got the lock; move to next state */
+ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+ break;
+ }
+
+ /* wait a bit for the lock */
+ ace_fsm_yield(ace);
+ break;
+
+ case ACE_FSM_STATE_WAIT_CFREADY:
+ status = ace_in32(ace, ACE_STATUS);
+ if (!(status & ACE_STATUS_RDYFORCFCMD) ||
+ (status & ACE_STATUS_CFBSY)) {
+ /* CF card isn't ready; it needs to be polled */
+ ace_fsm_yield(ace);
+ break;
+ }
+
+ /* Device is ready for command; determine what to do next */
+ if (ace->id_req_count)
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
+ else
+ ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_PREPARE:
+ /* Send identify command */
+ ace->fsm_task = ACE_TASK_IDENTIFY;
+ ace->data_ptr = &ace->cf_id;
+ ace->data_count = ACE_BUF_PER_SECTOR;
+ ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
+
+ /* As per datasheet, put config controller in reset */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+ /* irq handler takes over from this point; wait for the
+ * transfer to complete */
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
+ ace_fsm_yieldirq(ace);
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_TRANSFER:
+ /* Check that the sysace is ready to receive data */
+ status = ace_in32(ace, ACE_STATUS);
+ if (status & ACE_STATUS_CFBSY) {
+ dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->data_count);
+ ace_fsm_yield(ace);
+ break;
+ }
+ if (!(status & ACE_STATUS_DATABUFRDY)) {
+ ace_fsm_yield(ace);
+ break;
+ }
+
+ /* Transfer the next buffer */
+ ace->reg_ops->datain(ace);
+ ace->data_count--;
+
+ /* If there are still buffers to be transfers; jump out here */
+ if (ace->data_count != 0) {
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* transfer finished; kick state machine */
+ dev_dbg(ace->dev, "identify finished\n");
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_COMPLETE:
+ ace_fix_driveid(&ace->cf_id);
+ ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */
+
+ if (ace->data_result) {
+ /* Error occured, disable the disk */
+ ace->media_change = 1;
+ set_capacity(ace->gd, 0);
+ dev_err(ace->dev, "error fetching CF id (%i)\n",
+ ace->data_result);
+ } else {
+ ace->media_change = 0;
+
+ /* Record disk parameters */
+ set_capacity(ace->gd, ace->cf_id.lba_capacity);
+ dev_info(ace->dev, "capacity: %i sectors\n",
+ ace->cf_id.lba_capacity);
+ }
+
+ /* We're done, drop to IDLE state and notify waiters */
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ ace->id_result = ace->data_result;
+ while (ace->id_req_count) {
+ complete(&ace->id_completion);
+ ace->id_req_count--;
+ }
+ break;
+
+ case ACE_FSM_STATE_REQ_PREPARE:
+ req = ace_get_next_request(ace->queue);
+ if (!req) {
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+ }
+
+ /* Okay, it's a data request, set it up for transfer */
+ dev_dbg(ace->dev,
+ "request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
+ req->sector, req->hard_nr_sectors,
+ req->current_nr_sectors, rq_data_dir(req));
+
+ ace->req = req;
+ ace->data_ptr = req->buffer;
+ ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
+ ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+
+ count = req->hard_nr_sectors;
+ if (rq_data_dir(req)) {
+ /* Kick off write request */
+ dev_dbg(ace->dev, "write data\n");
+ ace->fsm_task = ACE_TASK_WRITE;
+ ace_out(ace, ACE_SECCNTCMD,
+ count | ACE_SECCNTCMD_WRITE_DATA);
+ } else {
+ /* Kick off read request */
+ dev_dbg(ace->dev, "read data\n");
+ ace->fsm_task = ACE_TASK_READ;
+ ace_out(ace, ACE_SECCNTCMD,
+ count | ACE_SECCNTCMD_READ_DATA);
+ }
+
+ /* As per datasheet, put config controller in reset */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+ /* Move to the transfer state. The systemace will raise
+ * an interrupt once there is something to do
+ */
+ ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
+ if (ace->fsm_task == ACE_TASK_READ)
+ ace_fsm_yieldirq(ace); /* wait for data ready */
+ break;
+
+ case ACE_FSM_STATE_REQ_TRANSFER:
+ /* Check that the sysace is ready to receive data */
+ status = ace_in32(ace, ACE_STATUS);
+ if (status & ACE_STATUS_CFBSY) {
+ dev_dbg(ace->dev,
+ "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->req->current_nr_sectors * 16,
+ ace->data_count, ace->in_irq);
+ ace_fsm_yield(ace); /* need to poll CFBSY bit */
+ break;
+ }
+ if (!(status & ACE_STATUS_DATABUFRDY)) {
+ dev_dbg(ace->dev,
+ "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->req->current_nr_sectors * 16,
+ ace->data_count, ace->in_irq);
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* Transfer the next buffer */
+ i = 16;
+ if (ace->fsm_task == ACE_TASK_WRITE)
+ ace->reg_ops->dataout(ace);
+ else
+ ace->reg_ops->datain(ace);
+ ace->data_count--;
+
+ /* If there are still buffers to be transfers; jump out here */
+ if (ace->data_count != 0) {
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* bio finished; is there another one? */
+ i = ace->req->current_nr_sectors;
+ if (end_that_request_first(ace->req, 1, i)) {
+ /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
+ * ace->req->hard_nr_sectors,
+ * ace->req->current_nr_sectors);
+ */
+ ace->data_ptr = ace->req->buffer;
+ ace->data_count = ace->req->current_nr_sectors * 16;
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
+ break;
+
+ case ACE_FSM_STATE_REQ_COMPLETE:
+ /* Complete the block request */
+ blkdev_dequeue_request(ace->req);
+ end_that_request_last(ace->req, 1);
+ ace->req = NULL;
+
+ /* Finished request; go to idle state */
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+
+ default:
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+ }
+}
+
+static void ace_fsm_tasklet(unsigned long data)
+{
+ struct ace_device *ace = (void *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ace->lock, flags);
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+static void ace_stall_timer(unsigned long data)
+{
+ struct ace_device *ace = (void *)data;
+ unsigned long flags;
+
+ dev_warn(ace->dev,
+ "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
+ ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
+ ace->data_count);
+ spin_lock_irqsave(&ace->lock, flags);
+
+ /* Rearm the stall timer *before* entering FSM (which may then
+ * delete the timer) */
+ mod_timer(&ace->stall_timer, jiffies + HZ);
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+/* ---------------------------------------------------------------------
+ * Interrupt handling routines
+ */
+static int ace_interrupt_checkstate(struct ace_device *ace)
+{
+ u32 sreg = ace_in32(ace, ACE_STATUS);
+ u16 creg = ace_in(ace, ACE_CTRL);
+
+ /* Check for error occurance */
+ if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
+ (creg & ACE_CTRL_ERRORIRQ)) {
+ dev_err(ace->dev, "transfer failure\n");
+ ace_dump_regs(ace);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ace_interrupt(int irq, void *dev_id)
+{
+ u16 creg;
+ struct ace_device *ace = dev_id;
+
+ /* be safe and get the lock */
+ spin_lock(&ace->lock);
+ ace->in_irq = 1;
+
+ /* clear the interrupt */
+ creg = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
+ ace_out(ace, ACE_CTRL, creg);
+
+ /* check for IO failures */
+ if (ace_interrupt_checkstate(ace))
+ ace->data_result = -EIO;
+
+ if (ace->fsm_task == 0) {
+ dev_err(ace->dev,
+ "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
+ ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
+ ace_in(ace, ACE_SECCNTCMD));
+ dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
+ ace->fsm_task, ace->fsm_state, ace->data_count);
+ }
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ /* done with interrupt; drop the lock */
+ ace->in_irq = 0;
+ spin_unlock(&ace->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------
+ * Block ops
+ */
+static void ace_request(request_queue_t * q)
+{
+ struct request *req;
+ struct ace_device *ace;
+
+ req = ace_get_next_request(q);
+
+ if (req) {
+ ace = req->rq_disk->private_data;
+ tasklet_schedule(&ace->fsm_tasklet);
+ }
+}
+
+static int ace_media_changed(struct gendisk *gd)
+{
+ struct ace_device *ace = gd->private_data;
+ dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change);
+
+ return ace->media_change;
+}
+
+static int ace_revalidate_disk(struct gendisk *gd)
+{
+ struct ace_device *ace = gd->private_data;
+ unsigned long flags;
+
+ dev_dbg(ace->dev, "ace_revalidate_disk()\n");
+
+ if (ace->media_change) {
+ dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
+
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->id_req_count++;
+ spin_unlock_irqrestore(&ace->lock, flags);
+
+ tasklet_schedule(&ace->fsm_tasklet);
+ wait_for_completion(&ace->id_completion);
+ }
+
+ dev_dbg(ace->dev, "revalidate complete\n");
+ return ace->id_result;
+}
+
+static int ace_open(struct inode *inode, struct file *filp)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ unsigned long flags;
+
+ dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
+
+ filp->private_data = ace;
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->users++;
+ spin_unlock_irqrestore(&ace->lock, flags);
+
+ check_disk_change(inode->i_bdev);
+ return 0;
+}
+
+static int ace_release(struct inode *inode, struct file *filp)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ unsigned long flags;
+ u16 val;
+
+ dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
+
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->users--;
+ if (ace->users == 0) {
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
+ }
+ spin_unlock_irqrestore(&ace->lock, flags);
+ return 0;
+}
+
+static int ace_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
+ struct hd_geometry g;
+ dev_dbg(ace->dev, "ace_ioctl()\n");
+
+ switch (cmd) {
+ case HDIO_GETGEO:
+ g.heads = ace->cf_id.heads;
+ g.sectors = ace->cf_id.sectors;
+ g.cylinders = ace->cf_id.cyls;
+ g.start = 0;
+ return copy_to_user(geo, &g, sizeof(g)) ? -EFAULT : 0;
+
+ default:
+ return -ENOTTY;
+ }
+ return -ENOTTY;
+}
+
+static struct block_device_operations ace_fops = {
+ .owner = THIS_MODULE,
+ .open = ace_open,
+ .release = ace_release,
+ .media_changed = ace_media_changed,
+ .revalidate_disk = ace_revalidate_disk,
+ .ioctl = ace_ioctl,
+};
+
+/* --------------------------------------------------------------------
+ * SystemACE device setup/teardown code
+ */
+static int __devinit ace_setup(struct ace_device *ace)
+{
+ u16 version;
+ u16 val;
+
+ int rc;
+
+ spin_lock_init(&ace->lock);
+ init_completion(&ace->id_completion);
+
+ /*
+ * Map the device
+ */
+ ace->baseaddr = ioremap(ace->physaddr, 0x80);
+ if (!ace->baseaddr)
+ goto err_ioremap;
+
+ if (ace->irq != NO_IRQ) {
+ rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
+ if (rc) {
+ /* Failure - fall back to polled mode */
+ dev_err(ace->dev, "request_irq failed\n");
+ ace->irq = NO_IRQ;
+ }
+ }
+
+ /*
+ * Initialize the state machine tasklet and stall timer
+ */
+ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
+ setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+
+ /*
+ * Initialize the request queue
+ */
+ ace->queue = blk_init_queue(ace_request, &ace->lock);
+ if (ace->queue == NULL)
+ goto err_blk_initq;
+ blk_queue_hardsect_size(ace->queue, 512);
+
+ /*
+ * Allocate and initialize GD structure
+ */
+ ace->gd = alloc_disk(ACE_NUM_MINORS);
+ if (!ace->gd)
+ goto err_alloc_disk;
+
+ ace->gd->major = ace_major;
+ ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
+ ace->gd->fops = &ace_fops;
+ ace->gd->queue = ace->queue;
+ ace->gd->private_data = ace;
+ snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
+
+ /* set bus width */
+ if (ace->bus_width == 1) {
+ /* 0x0101 should work regardless of endianess */
+ ace_out_le16(ace, ACE_BUSMODE, 0x0101);
+
+ /* read it back to determine endianess */
+ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
+ ace->reg_ops = &ace_reg_le16_ops;
+ else
+ ace->reg_ops = &ace_reg_be16_ops;
+ } else {
+ ace_out_8(ace, ACE_BUSMODE, 0x00);
+ ace->reg_ops = &ace_reg_8_ops;
+ }
+
+ /* Make sure version register is sane */
+ version = ace_in(ace, ACE_VERSION);
+ if ((version == 0) || (version == 0xFFFF))
+ goto err_read;
+
+ /* Put sysace in a sane state by clearing most control reg bits */
+ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
+ ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
+
+ /* Enable interrupts */
+ val = ace_in(ace, ACE_CTRL);
+ val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
+ ace_out(ace, ACE_CTRL, val);
+
+ /* Print the identification */
+ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
+ (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
+ dev_dbg(ace->dev, "physaddr 0x%lx, mapped to 0x%p, irq=%i\n",
+ ace->physaddr, ace->baseaddr, ace->irq);
+
+ ace->media_change = 1;
+ ace_revalidate_disk(ace->gd);
+
+ /* Make the sysace device 'live' */
+ add_disk(ace->gd);
+
+ return 0;
+
+ err_read:
+ put_disk(ace->gd);
+ err_alloc_disk:
+ blk_cleanup_queue(ace->queue);
+ err_blk_initq:
+ iounmap(ace->baseaddr);
+ if (ace->irq != NO_IRQ)
+ free_irq(ace->irq, ace);
+ err_ioremap:
+ printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
+ ace->physaddr);
+ return -ENOMEM;
+}
+
+static void __devexit ace_teardown(struct ace_device *ace)
+{
+ if (ace->gd) {
+ del_gendisk(ace->gd);
+ put_disk(ace->gd);
+ }
+
+ if (ace->queue)
+ blk_cleanup_queue(ace->queue);
+
+ tasklet_kill(&ace->fsm_tasklet);
+
+ if (ace->irq != NO_IRQ)
+ free_irq(ace->irq, ace);
+
+ iounmap(ace->baseaddr);
+}
+
+/* ---------------------------------------------------------------------
+ * Platform Bus Support
+ */
+
+static int __devinit ace_probe(struct device *device)
+{
+ struct platform_device *dev = to_platform_device(device);
+ struct ace_device *ace;
+ int i;
+
+ dev_dbg(device, "ace_probe(%p)\n", device);
+
+ /*
+ * Allocate the ace device structure
+ */
+ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
+ if (!ace)
+ goto err_alloc;
+
+ ace->dev = device;
+ ace->id = dev->id;
+ ace->irq = NO_IRQ;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ if (dev->resource[i].flags & IORESOURCE_MEM)
+ ace->physaddr = dev->resource[i].start;
+ if (dev->resource[i].flags & IORESOURCE_IRQ)
+ ace->irq = dev->resource[i].start;
+ }
+
+ /* FIXME: Should get bus_width from the platform_device struct */
+ ace->bus_width = 1;
+
+ dev_set_drvdata(&dev->dev, ace);
+
+ /* Call the bus-independant setup code */
+ if (ace_setup(ace) != 0)
+ goto err_setup;
+
+ return 0;
+
+ err_setup:
+ dev_set_drvdata(&dev->dev, NULL);
+ kfree(ace);
+ err_alloc:
+ printk(KERN_ERR "xsysace: could not initialize device\n");
+ return -ENOMEM;
+}
+
+/*
+ * Platform bus remove() method
+ */
+static int __devexit ace_remove(struct device *device)
+{
+ struct ace_device *ace = dev_get_drvdata(device);
+
+ dev_dbg(device, "ace_remove(%p)\n", device);
+
+ if (ace) {
+ ace_teardown(ace);
+ kfree(ace);
+ }
+
+ return 0;
+}
+
+static struct device_driver ace_driver = {
+ .name = "xsysace",
+ .bus = &platform_bus_type,
+ .probe = ace_probe,
+ .remove = __devexit_p(ace_remove),
+};
+
+/* ---------------------------------------------------------------------
+ * Module init/exit routines
+ */
+static int __init ace_init(void)
+{
+ ace_major = register_blkdev(ace_major, "xsysace");
+ if (ace_major <= 0) {
+ printk(KERN_WARNING "xsysace: register_blkdev() failed\n");
+ return ace_major;
+ }
+
+ pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major);
+ return driver_register(&ace_driver);
+}
+
+static void __exit ace_exit(void)
+{
+ pr_debug("Unregistering Xilinx SystemACE driver\n");
+ driver_unregister(&ace_driver);
+ unregister_blkdev(ace_major, "xsysace");
+}
+
+module_init(ace_init);
+module_exit(ace_exit);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 7cc2685ca84..e40fa98842e 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -44,9 +44,6 @@
extern int m68k_realnum_memory;
extern struct mem_info m68k_memory[NUM_MEMINFO];
-#define TRUE (1)
-#define FALSE (0)
-
#define Z2MINOR_COMBINED (0)
#define Z2MINOR_Z2ONLY (1)
#define Z2MINOR_CHIPONLY (2)
@@ -374,9 +371,7 @@ static void __exit z2_exit(void)
{
int i, j;
blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
- if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 )
- printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
-
+ unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
del_gendisk(z2ram_gendisk);
put_disk(z2ram_gendisk);
blk_cleanup_queue(z2_queue);