aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-02-23 21:16:27 -0500
committerJeff Garzik <jeff@garzik.org>2006-02-23 21:16:27 -0500
commit7b0386921db20add25afd8678ed34a9253e512fc (patch)
tree8c6b888b76211e38e6c1c3006553dc4b2b24c75e /drivers
parentc5580a7ecb859c6821dd761c95fa150ec7695ae1 (diff)
parent22fe472cb430ce45c4fb9b6d13060dd724d6dbc8 (diff)
Merge branch 'upstream-fixes'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/pktcdvd.c82
-rw-r--r--drivers/char/agp/Kconfig55
-rw-r--r--drivers/char/agp/amd64-agp.c6
-rw-r--r--drivers/char/agp/sworks-agp.c4
-rw-r--r--drivers/char/drm/i915_irq.c5
-rw-r--r--drivers/char/drm/r300_cmdbuf.c48
-rw-r--r--drivers/char/drm/r300_reg.h3
-rw-r--r--drivers/char/drm/radeon_drv.h3
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/crypto/padlock-aes.c8
-rw-r--r--drivers/fc4/fc.c2
-rw-r--r--drivers/ieee1394/sbp2.c126
-rw-r--r--drivers/ieee1394/sbp2.h64
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c14
-rw-r--r--drivers/net/irda/irda-usb.c90
-rw-r--r--drivers/net/irda/irda-usb.h7
-rw-r--r--drivers/net/r8169.c189
-rw-r--r--drivers/net/skge.c75
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c173
-rw-r--r--drivers/net/sky2.h83
-rw-r--r--drivers/net/wireless/ipw2200.c6
-rw-r--r--drivers/s390/block/Kconfig14
-rw-r--r--drivers/s390/block/Makefile2
-rw-r--r--drivers/s390/block/dasd.c97
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/block/dasd_eer.c1090
-rw-r--r--drivers/s390/block/dasd_int.h38
-rw-r--r--drivers/s390/cio/qdio.c13
-rw-r--r--drivers/scsi/esp.c4
-rw-r--r--drivers/scsi/libata-core.c15
-rw-r--r--drivers/scsi/sata_qstor.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/video/aty/radeon_pm.c19
35 files changed, 730 insertions, 1619 deletions
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 93e44d0292a..bc9b2bcd7db 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -58,6 +58,7 @@
#include <linux/suspend.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi.h>
#include <asm/uaccess.h>
@@ -380,6 +381,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->ref_count++;
rq->flags |= REQ_NOMERGE;
@@ -1495,40 +1497,42 @@ static int pkt_set_write_settings(struct pktcdvd_device *pd)
}
/*
- * 0 -- we can write to this track, 1 -- we can't
+ * 1 -- we can write to this track, 0 -- we can't
*/
-static int pkt_good_track(track_information *ti)
+static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
{
- /*
- * only good for CD-RW at the moment, not DVD-RW
- */
+ switch (pd->mmc3_profile) {
+ case 0x1a: /* DVD+RW */
+ case 0x12: /* DVD-RAM */
+ /* The track is always writable on DVD+RW/DVD-RAM */
+ return 1;
+ default:
+ break;
+ }
- /*
- * FIXME: only for FP
- */
- if (ti->fp == 0)
+ if (!ti->packet || !ti->fp)
return 0;
/*
* "good" settings as per Mt Fuji.
*/
- if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
- return 0;
+ if (ti->rt == 0 && ti->blank == 0)
+ return 1;
- if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
- return 0;
+ if (ti->rt == 0 && ti->blank == 1)
+ return 1;
- if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
- return 0;
+ if (ti->rt == 1 && ti->blank == 0)
+ return 1;
printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
- return 1;
+ return 0;
}
/*
- * 0 -- we can write to this disc, 1 -- we can't
+ * 1 -- we can write to this disc, 0 -- we can't
*/
-static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
+static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
{
switch (pd->mmc3_profile) {
case 0x0a: /* CD-RW */
@@ -1537,10 +1541,10 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
case 0x1a: /* DVD+RW */
case 0x13: /* DVD-RW */
case 0x12: /* DVD-RAM */
- return 0;
+ return 1;
default:
VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
- return 1;
+ return 0;
}
/*
@@ -1549,25 +1553,25 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
*/
if (di->disc_type == 0xff) {
printk("pktcdvd: Unknown disc. No track?\n");
- return 1;
+ return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
- return 1;
+ return 0;
}
if (di->erasable == 0) {
printk("pktcdvd: Disc not erasable\n");
- return 1;
+ return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
printk("pktcdvd: Can't write to last track (reserved)\n");
- return 1;
+ return 0;
}
- return 0;
+ return 1;
}
static int pkt_probe_settings(struct pktcdvd_device *pd)
@@ -1592,23 +1596,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
return ret;
}
- if (pkt_good_disc(pd, &di))
- return -ENXIO;
+ if (!pkt_writable_disc(pd, &di))
+ return -EROFS;
- switch (pd->mmc3_profile) {
- case 0x1a: /* DVD+RW */
- printk("pktcdvd: inserted media is DVD+RW\n");
- break;
- case 0x13: /* DVD-RW */
- printk("pktcdvd: inserted media is DVD-RW\n");
- break;
- case 0x12: /* DVD-RAM */
- printk("pktcdvd: inserted media is DVD-RAM\n");
- break;
- default:
- printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
- break;
- }
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
@@ -1617,9 +1607,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
return ret;
}
- if (pkt_good_track(&ti)) {
+ if (!pkt_writable_track(pd, &ti)) {
printk("pktcdvd: can't write to this track\n");
- return -ENXIO;
+ return -EROFS;
}
/*
@@ -1633,7 +1623,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
printk("pktcdvd: packet size is too big\n");
- return -ENXIO;
+ return -EROFS;
}
pd->settings.fp = ti.fp;
pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
@@ -1675,7 +1665,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
break;
default:
printk("pktcdvd: unknown data mode\n");
- return 1;
+ return -EROFS;
}
return 0;
}
@@ -1886,7 +1876,7 @@ static int pkt_open_write(struct pktcdvd_device *pd)
if ((ret = pkt_probe_settings(pd))) {
VPRINTK("pktcdvd: %s failed probe\n", pd->name);
- return -EROFS;
+ return ret;
}
if ((ret = pkt_set_write_settings(pd))) {
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 486ed8a11b5..a4d425d2dce 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -15,22 +15,23 @@ config AGP
due to kernel allocation issues), you could use PCI accesses
and have up to a couple gigs of texture space.
- Note that this is the only means to have XFree4/GLX use
+ Note that this is the only means to have X/GLX use
write-combining with MTRR support on the AGP bus. Without it, OpenGL
direct rendering will be a lot slower but still faster than PIO.
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say N.
-
To compile this driver as a module, choose M here: the
module will be called agpgart.
+ You should say Y here if you want to use GLX or DRI.
+
+ If unsure, say N.
+
config AGP_ALI
tristate "ALI chipset support"
depends on AGP && X86_32
---help---
This option gives you AGP support for the GLX component of
- XFree86 4.x on the following ALi chipsets. The supported chipsets
+ X on the following ALi chipsets. The supported chipsets
include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
For the ALi-chipset question, ALi suggests you refer to
<http://www.ali.com.tw/eng/support/index.shtml>.
@@ -40,28 +41,19 @@ config AGP_ALI
timing issues, this chipset cannot do AGP 2x with the G200.
This is a hardware limitation. AGP 1x seems to be fine, though.
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say N.
-
config AGP_ATI
tristate "ATI chipset support"
depends on AGP && X86_32
---help---
- This option gives you AGP support for the GLX component of
- XFree86 4.x on the ATI RadeonIGP family of chipsets.
-
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say N.
+ This option gives you AGP support for the GLX component of
+ X on the ATI RadeonIGP family of chipsets.
config AGP_AMD
tristate "AMD Irongate, 761, and 762 chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
- XFree86 4.x on AMD Irongate, 761, and 762 chipsets.
-
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say N.
+ X on AMD Irongate, 761, and 762 chipsets.
config AGP_AMD64
tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
@@ -69,45 +61,38 @@ config AGP_AMD64
default y if GART_IOMMU
help
This option gives you AGP support for the GLX component of
- XFree86 4.x using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
+ X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
You still need an external AGP bridge like the AMD 8151, VIA
K8T400M, SiS755. It may also support other AGP bridges when loaded
with agp_try_unsupported=1.
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say Y
config AGP_INTEL
tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
depends on AGP && X86
help
- This option gives you AGP support for the GLX component of XFree86 4.x
+ This option gives you AGP support for the GLX component of X
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
- E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G,
- 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+ E7205 and E7505 chipsets and full support for the 810, 815, 830M,
+ 845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI, or if you have any Intel integrated graphics
- chipsets. If unsure, say Y.
config AGP_NVIDIA
tristate "NVIDIA nForce/nForce2 chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
- XFree86 4.x on the following NVIDIA chipsets. The supported chipsets
- include nForce and nForce2
+ X on NVIDIA chipsets including nForce and nForce2
config AGP_SIS
tristate "SiS chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
- XFree86 4.x on Silicon Integrated Systems [SiS] chipsets.
+ X on Silicon Integrated Systems [SiS] chipsets.
Note that 5591/5592 AGP chipsets are NOT supported.
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say N.
config AGP_SWORKS
tristate "Serverworks LE/HE chipset support"
@@ -121,10 +106,7 @@ config AGP_VIA
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
- XFree86 4.x on VIA MVP3/Apollo Pro chipsets.
-
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say N.
+ X on VIA MVP3/Apollo Pro chipsets.
config AGP_I460
tristate "Intel 460GX chipset support"
@@ -159,9 +141,6 @@ config AGP_EFFICEON
This option gives you AGP support for the Transmeta Efficeon
series processors with integrated northbridges.
- You should say Y here if you use XFree86 3.3.6 or 4.x and want to
- use GLX or DRI. If unsure, say Y.
-
config AGP_SGI_TIOCA
tristate "SGI TIO chipset AGP support"
depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9964c508c11..1251b2515bb 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -516,8 +516,10 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev)
pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase);
/* if x86-64 aperture base is beyond 4G, exit here */
- if ( (apbase & 0x7fff) >> (32 - 25) )
- return -ENODEV;
+ if ( (apbase & 0x7fff) >> (32 - 25) ) {
+ printk(KERN_INFO PFX "aperture base > 4G\n");
+ return -ENODEV;
+ }
apbase = (apbase & 0x7fff) << 25;
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 268f78d926d..efef9999f1c 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -468,9 +468,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
switch (pdev->device) {
case 0x0006:
- /* ServerWorks CNB20HE
- Fail silently.*/
- printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
+ printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n");
return -ENODEV;
case PCI_DEVICE_ID_SERVERWORKS_HE:
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index a1381c61aa6..d3879ac9970 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -202,10 +202,15 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
void i915_driver_irq_uninstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u16 temp;
+
if (!dev_priv)
return;
I915_WRITE16(I915REG_HWSTAM, 0xffff);
I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+
+ temp = I915_READ16(I915REG_INT_IDENTITY_R);
+ I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
}
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 291dbf4c818..c08fa5076f0 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -161,6 +161,7 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
+ ADD_RANGE(R300_TX_CNTL, 1);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
@@ -489,6 +490,50 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
return 0;
}
+static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ u32 *cmd = (u32 *) cmdbuf->buf;
+ int count, ret;
+ RING_LOCALS;
+
+ count=(cmd[0]>>16) & 0x3fff;
+
+ if (cmd[0] & 0x8000) {
+ u32 offset;
+
+ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ offset = cmd[2] << 10;
+ ret = r300_check_offset(dev_priv, offset);
+ if (ret) {
+ DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
+ return DRM_ERR(EINVAL);
+ }
+ }
+
+ if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ offset = cmd[3] << 10;
+ ret = r300_check_offset(dev_priv, offset);
+ if (ret) {
+ DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
+ return DRM_ERR(EINVAL);
+ }
+
+ }
+ }
+
+ BEGIN_RING(count+2);
+ OUT_RING(cmd[0]);
+ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ ADVANCE_RING();
+
+ cmdbuf->buf += (count+2)*4;
+ cmdbuf->bufsz -= (count+2)*4;
+
+ return 0;
+}
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
@@ -527,6 +572,9 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
+ case RADEON_CNTL_BITBLT_MULTI:
+ return r300_emit_bitblt_multi(dev_priv, cmdbuf);
+
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index a0ed20e2522..d1e19954406 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -451,6 +451,9 @@ I am fairly certain that they are correct unless stated otherwise in comments.
/* END */
/* gap */
+/* Zero to flush caches. */
+#define R300_TX_CNTL 0x4100
+
/* The upper enable bits are guessed, based on fglrx reported limits. */
#define R300_TX_ENABLE 0x4104
# define R300_TX_ENABLE_0 (1 << 0)
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 498b19b1d64..1f7d2ab8c4f 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -90,9 +90,10 @@
* 1.19- Add support for gart table in FB memory and PCIE r300
* 1.20- Add support for r300 texrect
* 1.21- Add support for card type getparam
+ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 21
+#define DRIVER_MINOR 22
#define DRIVER_PATCHLEVEL 0
/*
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5765f672e85..d58f8231885 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -243,7 +243,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(void *ignored)
{
- out_of_memory(GFP_KERNEL, 0);
+ out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], GFP_KERNEL, 0);
}
static DECLARE_WORK(moom_work, moom_callback, NULL);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 64819aa7cac..0c08c58252b 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -348,10 +348,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
break;
case 32:
- E_KEY[4] = le32_to_cpu(in_key[4]);
- E_KEY[5] = le32_to_cpu(in_key[5]);
- E_KEY[6] = le32_to_cpu(in_key[6]);
- t = E_KEY[7] = le32_to_cpu(in_key[7]);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ E_KEY[5] = le32_to_cpu(key[5]);
+ E_KEY[6] = le32_to_cpu(key[6]);
+ t = E_KEY[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i)
loop8 (i);
break;
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index 5c8943509cc..66d03f242d3 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -1053,7 +1053,7 @@ static int fc_do_els(fc_channel *fc, unsigned int alpa, void *data, int len)
int i;
fcmd = &_fcmd;
- memset(fcmd, 0, sizeof(fcmd));
+ memset(fcmd, 0, sizeof(fcp_cmnd));
FCD(("PLOGI SID %d DID %d\n", fc->sid, alpa))
fch = &fcmd->fch;
FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, alpa);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 18d7eda3885..eca92eb475a 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -137,15 +137,15 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"
/*
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
* if your sbp2 device is not properly handling the SCSI inquiry command.
- * This hack makes the inquiry look more like a typical MS Windows
- * inquiry.
+ * This hack makes the inquiry look more like a typical MS Windows inquiry
+ * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
*
* If force_inquiry_hack=1 is required for your device to work,
* please submit the logged sbp2_firmware_revision value of this device to
* the linux1394-devel mailing list.
*/
static int force_inquiry_hack;
-module_param(force_inquiry_hack, int, 0444);
+module_param(force_inquiry_hack, int, 0644);
MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
/*
@@ -264,18 +264,17 @@ static struct hpsb_protocol_driver sbp2_driver = {
},
};
-
-/* List of device firmware's that require a forced 36 byte inquiry. */
+/*
+ * List of device firmwares that require the inquiry hack.
+ * Yields a few false positives but did not break other devices so far.
+ */
static u32 sbp2_broken_inquiry_list[] = {
- 0x00002800, /* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */
+ 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
/* DViCO Momobay CX-1 */
0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
/* QPS Fire DVDBurner */
};
-#define NUM_BROKEN_INQUIRY_DEVS \
- (sizeof(sbp2_broken_inquiry_list)/sizeof(*sbp2_broken_inquiry_list))
-
/**************************************
* General utility functions
**************************************/
@@ -643,9 +642,15 @@ static int sbp2_remove(struct device *dev)
if (!scsi_id)
return 0;
- /* Trigger shutdown functions in scsi's highlevel. */
- if (scsi_id->scsi_host)
+ if (scsi_id->scsi_host) {
+ /* Get rid of enqueued commands if there is no chance to
+ * send them. */
+ if (!sbp2util_node_is_available(scsi_id))
+ sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT);
+ /* scsi_remove_device() will trigger shutdown functions of SCSI
+ * highlevel drivers which would deadlock if blocked. */
scsi_unblock_requests(scsi_id->scsi_host);
+ }
sdev = scsi_id->sdev;
if (sdev) {
scsi_id->sdev = NULL;
@@ -742,11 +747,6 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
hi->host = ud->ne->host;
INIT_LIST_HEAD(&hi->scsi_ids);
- /* Register our sbp2 status address space... */
- hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_ops,
- SBP2_STATUS_FIFO_ADDRESS,
- SBP2_STATUS_FIFO_ADDRESS +
- SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2_MAX_UDS_PER_NODE+1));
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
/* Handle data movement if physical dma is not
* enabled/supportedon host controller */
@@ -759,6 +759,18 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
+ /* Register the status FIFO address range. We could use the same FIFO
+ * for targets at different nodes. However we need different FIFOs per
+ * target in order to support multi-unit devices. */
+ scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
+ &sbp2_highlevel, ud->ne->host, &sbp2_ops,
+ sizeof(struct sbp2_status_block), sizeof(quadlet_t),
+ ~0ULL, ~0ULL);
+ if (!scsi_id->status_fifo_addr) {
+ SBP2_ERR("failed to allocate status FIFO address range");
+ goto failed_alloc;
+ }
+
/* Register our host with the SCSI stack. */
scsi_host = scsi_host_alloc(&scsi_driver_template,
sizeof(unsigned long));
@@ -997,6 +1009,10 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
SBP2_DMA_FREE("single query logins data");
}
+ if (scsi_id->status_fifo_addr)
+ hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
+ scsi_id->status_fifo_addr);
+
scsi_id->ud->device.driver_data = NULL;
SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
@@ -1075,11 +1091,10 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized");
- scsi_id->query_logins_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
- SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
- scsi_id->query_logins_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
- SBP2_STATUS_FIFO_ADDRESS_HI);
- SBP2_DEBUG("sbp2_query_logins: status FIFO initialized");
+ scsi_id->query_logins_orb->status_fifo_hi =
+ ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
+ scsi_id->query_logins_orb->status_fifo_lo =
+ ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
@@ -1184,11 +1199,10 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
- scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
- SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
- scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
- SBP2_STATUS_FIFO_ADDRESS_HI);
- SBP2_DEBUG("sbp2_login_device: status FIFO initialized");
+ scsi_id->login_orb->status_fifo_hi =
+ ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
+ scsi_id->login_orb->status_fifo_lo =
+ ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
/*
* Byte swap ORB if necessary
@@ -1301,10 +1315,10 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->logout_orb->reserved5 = 0x0;
- scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
- SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
- scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
- SBP2_STATUS_FIFO_ADDRESS_HI);
+ scsi_id->logout_orb->status_fifo_hi =
+ ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
+ scsi_id->logout_orb->status_fifo_lo =
+ ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
/*
* Byte swap ORB if necessary
@@ -1366,10 +1380,10 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->reconnect_orb->reserved5 = 0x0;
- scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
- SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
- scsi_id->reconnect_orb->status_FIFO_hi =
- (ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
+ scsi_id->reconnect_orb->status_fifo_hi =
+ ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
+ scsi_id->reconnect_orb->status_fifo_lo =
+ ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
/*
* Byte swap ORB if necessary
@@ -1560,7 +1574,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
/* Check for a blacklisted set of devices that require us to force
* a 36 byte host inquiry. This can be overriden as a module param
* (to force all hosts). */
- for (i = 0; i < NUM_BROKEN_INQUIRY_DEVS; i++) {
+ for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
if ((firmware_revision & 0xffff00) ==
sbp2_broken_inquiry_list[i]) {
SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
@@ -2007,18 +2021,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
}
/*
- * The scsi stack sends down a request_bufflen which does not match the
- * length field in the scsi cdb. This causes some sbp2 devices to
- * reject this inquiry command. Fix the request_bufflen.
- */
- if (*cmd == INQUIRY) {
- if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
- request_bufflen = cmd[4] = 0x24;
- else
- request_bufflen = cmd[4];
- }
-
- /*
* Now actually fill in the comamnd orb and sbp2 s/g list
*/
sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
@@ -2082,9 +2084,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
SBP2_DEBUG("sbp2_check_sbp2_response");
- switch (SCpnt->cmnd[0]) {
-
- case INQUIRY:
+ if (SCpnt->cmnd[0] == INQUIRY && (SCpnt->cmnd[1] & 3) == 0) {
/*
* Make sure data length is ok. Minimum length is 36 bytes
*/
@@ -2097,13 +2097,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
*/
scsi_buf[2] |= 2;
scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
-
- break;
-
- default:
- break;
}
- return;
}
/*
@@ -2114,7 +2108,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
{
struct sbp2scsi_host_info *hi;
struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
- u32 id;
struct scsi_cmnd *SCpnt = NULL;
u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
struct sbp2_command_info *command;
@@ -2137,12 +2130,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
}
/*
- * Find our scsi_id structure by looking at the status fifo address written to by
- * the sbp2 device.
+ * Find our scsi_id structure by looking at the status fifo address
+ * written to by the sbp2 device.
*/
- id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS));
list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
- if (scsi_id_tmp->ne->nodeid == nodeid && scsi_id_tmp->ud->id == id) {
+ if (scsi_id_tmp->ne->nodeid == nodeid &&
+ scsi_id_tmp->status_fifo_addr == addr) {
scsi_id = scsi_id_tmp;
break;
}
@@ -2483,7 +2476,16 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
{
- ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
+ struct scsi_id_instance_data *scsi_id =
+ (struct scsi_id_instance_data *)sdev->host->hostdata[0];
+
+ scsi_id->sdev = sdev;
+
+ if (force_inquiry_hack ||
+ scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
+ sdev->inquiry_len = 36;
+ sdev->skip_ms_page_8 = 1;
+ }
return 0;
}
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index 900ea1d25e7..e2d357a9ea3 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -33,15 +33,17 @@
#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
#define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31)
-#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
-#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
+#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
+#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16)
-#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
-#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
-#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
-#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
-#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
-#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
+#define ORB_SET_STATUS_FIFO_HI(value, id) (value >> 32 | ORB_SET_NODE_ID(id))
+#define ORB_SET_STATUS_FIFO_LO(value) (value & 0xffffffff)
+#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
+#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
+#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
+#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
+#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
+#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
struct sbp2_command_orb {
volatile u32 next_ORB_hi;
@@ -76,8 +78,8 @@ struct sbp2_login_orb {
u32 login_response_lo;
u32 lun_misc;
u32 passwd_resp_lengths;
- u32 status_FIFO_hi;
- u32 status_FIFO_lo;
+ u32 status_fifo_hi;
+ u32 status_fifo_lo;
};
#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff)
@@ -102,8 +104,8 @@ struct sbp2_query_logins_orb {
u32 query_response_lo;
u32 lun_misc;
u32 reserved_resp_length;
- u32 status_FIFO_hi;
- u32 status_FIFO_lo;
+ u32 status_fifo_hi;
+ u32 status_fifo_lo;
};
#define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff)
@@ -123,8 +125,8 @@ struct sbp2_reconnect_orb {
u32 reserved4;
u32 login_ID_misc;
u32 reserved5;
- u32 status_FIFO_hi;
- u32 status_FIFO_lo;
+ u32 status_fifo_hi;
+ u32 status_fifo_lo;
};
struct sbp2_logout_orb {
@@ -134,8 +136,8 @@ struct sbp2_logout_orb {
u32 reserved4;
u32 login_ID_misc;
u32 reserved5;
- u32 status_FIFO_hi;
- u32 status_FIFO_lo;
+ u32 status_fifo_hi;
+ u32 status_fifo_lo;
};
#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff)
@@ -195,30 +197,6 @@ struct sbp2_status_block {
* Miscellaneous SBP2 related config rom defines
*/
-/* The status fifo address definition below is used as a base for each
- * node, which a chunk seperately assigned to each unit directory in the
- * node. For example, 0xfffe00000000ULL is used for the first sbp2 device
- * detected on node 0, 0xfffe00000020ULL for the next sbp2 device on node
- * 0, and so on.
- *
- * Note: We could use a single status fifo address for all sbp2 devices,
- * and figure out which sbp2 device the status belongs to by looking at
- * the source node id of the status write... but, using separate addresses
- * for each sbp2 unit directory allows for better code and the ability to
- * support multiple luns within a single 1394 node.
- *
- * Also note that we choose the address range below as it is a region
- * specified for write posting, where the ohci controller will
- * automatically send an ack_complete when the status is written by the
- * sbp2 device... saving a split transaction. =)
- */
-#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
-#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
-#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
-
-#define SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(entry) ((entry) << 5)
-#define SBP2_STATUS_FIFO_OFFSET_TO_ENTRY(offset) ((offset) >> 5)
-
#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
#define SBP2_CSR_OFFSET_KEY 0x54
#define SBP2_UNIT_SPEC_ID_KEY 0x12
@@ -258,7 +236,6 @@ struct sbp2_status_block {
*/
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
-#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#define SBP2_MAX_CMDS 8 /* This should be safe */
@@ -338,6 +315,11 @@ struct scsi_id_instance_data {
u32 sbp2_firmware_revision;
/*
+ * Address for the device to write status blocks to
+ */
+ u64 status_fifo_addr;
+
+ /*
* Variable used for logins, reconnects, logouts, query logins
*/
atomic_t sbp2_login_complete;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 69c04945591..ded2c33f5b8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1019,8 +1019,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
-#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
- UDELAY(map, chip, adr, usec)
+#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
+ UDELAY(map, chip, cmd_adr, usec)
/*
* Extra notes:
@@ -1052,7 +1052,7 @@ do { \
spin_lock(chip->mutex); \
} while (0)
-#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
do { \
spin_unlock(chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
@@ -1284,7 +1284,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
map_write(map, datum, adr);
chip->state = mode;
- INVALIDATE_CACHE_UDELAY(map, chip,
+ INVALIDATE_CACHE_UDELAY(map, chip, adr,
adr, map_bankwidth(map),
chip->word_write_time);
@@ -1572,8 +1572,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
- INVALIDATE_CACHE_UDELAY(map, chip,
- cmd_adr, len,
+ INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr,
+ adr, len,
chip->buffer_write_time);
timeo = jiffies + (HZ/2);
@@ -1744,7 +1744,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_ERASING;
chip->erase_suspended = 0;
- INVALIDATE_CACHE_UDELAY(map, chip,
+ INVALIDATE_CACHE_UDELAY(map, chip, adr,
adr, len,
chip->erase_time*1000/2);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index fa176ffb4ad..8936058a3cc 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
+static void irda_usb_rx_defer_expired(unsigned long data);
static int irda_usb_net_open(struct net_device *dev);
static int irda_usb_net_close(struct net_device *dev);
static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev)
* on the interrupt pipe and hang the Rx URB only when an interrupt is
* received.
* Jean II
+ *
+ * Note : don't read the above as what we are currently doing, but as
+ * something we could do with KC dongle. Also don't forget that the
+ * interrupt pipe is not part of the original standard, so this would
+ * need to be optional...
+ * Jean II
*/
/*------------------------------------------------------------------*/
@@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
/* Reinitialize URB */
usb_fill_bulk_urb(urb, self->usbdev,
usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
- skb->data, skb->truesize,
+ skb->data, IRDA_SKB_MAX_MTU,
irda_usb_receive, skb);
- /* Note : unlink *must* be synchronous because of the code in
- * irda_usb_net_close() -> free the skb - Jean II */
urb->status = 0;
/* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
@@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
struct irda_skb_cb *cb;
struct sk_buff *newskb;
struct sk_buff *dataskb;
+ struct urb *next_urb;
int docopy;
IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
@@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
if (urb->status != 0) {
switch (urb->status) {
case -EILSEQ:
- self->stats.rx_errors++;
self->stats.rx_crc_errors++;
- break;
+ /* Also precursor to a hot-unplug on UHCI. */
+ /* Fallthrough... */
case -ECONNRESET: /* -104 */
- IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags);
+ /* Random error, if I remember correctly */
/* uhci_cleanup_unlink() is going to kill the Rx
* URB just after we return. No problem, at this
* point the URB will be idle ;-) - Jean II */
- break;
+ case -ESHUTDOWN: /* -108 */
+ /* That's usually a hot-unplug. Submit will fail... */
+ case -ETIMEDOUT: /* -110 */
+ /* Usually precursor to a hot-unplug on OHCI. */
default:
- IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
+ self->stats.rx_errors++;
+ IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
break;
}
- goto done;
+ /* If we received an error, we don't want to resubmit the
+ * Rx URB straight away but to give the USB layer a little
+ * bit of breathing room.
+ * We are in the USB thread context, therefore there is a
+ * danger of recursion (new URB we submit fails, we come
+ * back here).
+ * With recent USB stack (2.6.15+), I'm seeing that on
+ * hot unplug of the dongle...
+ * Lowest effective timer is 10ms...
+ * Jean II */
+ self->rx_defer_timer.function = &irda_usb_rx_defer_expired;
+ self->rx_defer_timer.data = (unsigned long) urb;
+ mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
+ return;
}
/* Check for empty frames */
@@ -845,13 +868,45 @@ done:
* idle slot....
* Jean II */
/* Note : with this scheme, we could submit the idle URB before
- * processing the Rx URB. Another time... Jean II */
+ * processing the Rx URB. I don't think it would buy us anything as
+ * we are running in the USB thread context. Jean II */
+ next_urb = self->idle_rx_urb;
- /* Submit the idle URB to replace the URB we've just received */
- irda_usb_submit(self, skb, self->idle_rx_urb);
/* Recycle Rx URB : Now, the idle URB is the present one */
urb->context = NULL;
self->idle_rx_urb = urb;
+
+ /* Submit the idle URB to replace the URB we've just received.
+ * Do it last to avoid race conditions... Jean II */
+ irda_usb_submit(self, skb, next_urb);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * In case of errors, we want the USB layer to have time to recover.
+ * Now, it is time to resubmit ouur Rx URB...
+ */
+static void irda_usb_rx_defer_expired(unsigned long data)
+{
+ struct urb *urb = (struct urb *) data;
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct irda_usb_cb *self;
+ struct irda_skb_cb *cb;
+ struct urb *next_urb;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Find ourselves */
+ cb = (struct irda_skb_cb *) skb->cb;
+ IRDA_ASSERT(cb != NULL, return;);
+ self = (struct irda_usb_cb *) cb->context;
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* Same stuff as when Rx is done, see above... */
+ next_urb = self->idle_rx_urb;
+ urb->context = NULL;
+ self->idle_rx_urb = urb;
+ irda_usb_submit(self, skb, next_urb);
}
/*------------------------------------------------------------------*/
@@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev)
/* Stop network Tx queue */
netif_stop_queue(netdev);
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
/* Deallocate all the Rx path buffers (URBs and skb) */
for (i = 0; i < IU_MAX_RX_URBS; i++) {
struct urb *urb = self->rx_urb[i];
@@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf,
self = net->priv;
self->netdev = net;
spin_lock_init(&self->lock);
+ init_timer(&self->rx_defer_timer);
/* Create all of the needed urbs */
for (i = 0; i < IU_MAX_RX_URBS; i++) {
@@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf)
* This will stop/desactivate the Tx path. - Jean II */
self->present = 0;
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
/* We need to have irq enabled to unlink the URBs. That's OK,
* at this point the Tx path is gone - Jean II */
spin_unlock_irqrestore(&self->lock, flags);
@@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf)
/* Accept no more transmissions */
/*netif_device_detach(self->netdev);*/
netif_stop_queue(self->netdev);
- /* Stop all the receive URBs */
+ /* Stop all the receive URBs. Must be synchronous. */
for (i = 0; i < IU_MAX_RX_URBS; i++)
usb_kill_urb(self->rx_urb[i]);
/* Cancel Tx and speed URB.
- * Toggle flags to make sure it's synchronous. */
+ * Make sure it's synchronous to avoid races. */
usb_kill_urb(self->tx_urb);
usb_kill_urb(self->speed_urb);
}
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index bd8f6654232..4026af42dd4 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -136,8 +136,6 @@ struct irda_usb_cb {
__u16 bulk_out_mtu; /* Max Tx packet size in bytes */
__u8 bulk_int_ep; /* Interrupt Endpoint assignments */
- wait_queue_head_t wait_q; /* for timeouts */
-
struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
struct urb *tx_urb; /* URB used to send data frames */
@@ -147,17 +145,18 @@ struct irda_usb_cb {
struct net_device_stats stats;
struct irlap_cb *irlap; /* The link layer we are binded to */
struct qos_info qos;
- hashbin_t *tx_list; /* Queued transmit skb's */
char *speed_buff; /* Buffer for speed changes */
struct timeval stamp;
struct timeval now;
- spinlock_t lock; /* For serializing operations */
+ spinlock_t lock; /* For serializing Tx operations */
__u16 xbofs; /* Current xbofs setting */
__s16 new_xbofs; /* xbofs we need to set */
__u32 speed; /* Current speed */
__s32 new_speed; /* speed we need to set */
+
+ struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 6e1018448ee..8cc0d0bbdf5 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -287,6 +287,20 @@ enum RTL8169_register_content {
TxInterFrameGapShift = 24,
TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+ /* Config1 register p.24 */
+ PMEnable = (1 << 0), /* Power Management Enable */
+
+ /* Config3 register p.25 */
+ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
+ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
+
+ /* Config5 register p.27 */
+ BWF = (1 << 6), /* Accept Broadcast wakeup frame */
+ MWF = (1 << 5), /* Accept Multicast wakeup frame */
+ UWF = (1 << 4), /* Accept Unicast wakeup frame */
+ LanWake = (1 << 1), /* LanWake enable/disable */
+ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+
/* TBICSR p.28 */
TBIReset = 0x80000000,
TBILoopback = 0x40000000,
@@ -433,6 +447,7 @@ struct rtl8169_private {
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
struct work_struct task;
+ unsigned wol_enabled : 1;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
*duplex = p->duplex;
}
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 options;
+
+ wol->wolopts = 0;
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+ wol->supported = WAKE_ANY;
+
+ spin_lock_irq(&tp->lock);
+
+ options = RTL_R8(Config1);
+ if (!(options & PMEnable))
+ goto out_unlock;
+
+ options = RTL_R8(Config3);
+ if (options & LinkUp)
+ wol->wolopts |= WAKE_PHY;
+ if (options & MagicPacket)
+ wol->wolopts |= WAKE_MAGIC;
+
+ options = RTL_R8(Config5);
+ if (options & UWF)
+ wol->wolopts |= WAKE_UCAST;
+ if (options & BWF)
+ wol->wolopts |= WAKE_BCAST;
+ if (options & MWF)
+ wol->wolopts |= WAKE_MCAST;
+
+out_unlock:
+ spin_unlock_irq(&tp->lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+ static struct {
+ u32 opt;
+ u16 reg;
+ u8 mask;
+ } cfg[] = {
+ { WAKE_ANY, Config1, PMEnable },
+ { WAKE_PHY, Config3, LinkUp },
+ { WAKE_MAGIC, Config3, MagicPacket },
+ { WAKE_UCAST, Config5, UWF },
+ { WAKE_BCAST, Config5, BWF },
+ { WAKE_MCAST, Config5, MWF },
+ { WAKE_ANY, Config5, LanWake }
+ };
+
+ spin_lock_irq(&tp->lock);
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+ u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ if (wol->wolopts & cfg[i].opt)
+ options |= cfg[i].mask;
+ RTL_W8(cfg[i].reg, options);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ tp->wol_enabled = (wol->wolopts) ? 1 : 0;
+
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_regs = rtl8169_get_regs,
+ .get_wol = rtl8169_get_wol,
+ .set_wol = rtl8169_set_wol,
.get_strings = rtl8169_get_strings,
.get_stats_count = rtl8169_get_stats_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
@@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
tp->chipset = i;
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
*ioaddr_out = ioaddr;
*dev_out = dev;
out:
@@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
-
-static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_detach(dev);
- netif_stop_queue(dev);
- spin_lock_irqsave(&tp->lock, flags);
-
- /* Disable interrupts, stop Rx and Tx */
- RTL_W16(IntrMask, 0);
- RTL_W8(ChipCmd, 0);
-
- /* Update the error counts. */
- tp->stats.rx_missed_errors += RTL_R32(RxMissed);
- RTL_W32(RxMissed, 0);
- spin_unlock_irqrestore(&tp->lock, flags);
-
- return 0;
-}
-
-static int rtl8169_resume(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_attach(dev);
- rtl8169_hw_start(dev);
-
- return 0;
-}
-
-#endif /* CONFIG_PM */
-
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
struct net_device *dev)
{
@@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
return &tp->stats;
}
+#ifdef CONFIG_PM
+
+static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (!netif_running(dev))
+ goto out;
+
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_asic_down(ioaddr);
+
+ tp->stats.rx_missed_errors += RTL_R32(RxMissed);
+ RTL_W32(RxMissed, 0);
+
+ spin_unlock_irq(&tp->lock);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+out:
+ return 0;
+}
+
+static int rtl8169_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!netif_running(dev))
+ goto out;
+
+ netif_device_attach(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+out:
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 67fb19b8fde..25e028b7ce4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
int i;
xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
- xm_read16(hw, port, XM_PHY_DATA);
+ *val = xm_read16(hw, port, XM_PHY_DATA);
- /* Need to wait for external PHY */
for (i = 0; i < PHY_RETRIES; i++) {
- udelay(1);
if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
goto ready;
+ udelay(1);
}
return -ETIMEDOUT;
@@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
ready:
xm_write16(hw, port, XM_PHY_DATA, val);
- return 0;
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
}
static void genesis_init(struct skge_hw *hw)
@@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
u32 r;
const u8 zero[6] = { 0 };
- /* Clear MIB counters */
- xm_write16(hw, port, XM_STAT_CMD,
- XM_SC_CLR_RXC | XM_SC_CLR_TXC);
- /* Clear two times according to Errata #3 */
- xm_write16(hw, port, XM_STAT_CMD,
- XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+ for (i = 0; i < 10; i++) {
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+ MFF_SET_MAC_RST);
+ if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
+ goto reset_ok;
+ udelay(1);
+ }
+
+ printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
+ reset_ok:
/* Unreset the XMAC. */
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
@@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
r |= GP_DIR_2|GP_IO_2;
skge_write32(hw, B2_GP_IO, r);
- skge_read32(hw, B2_GP_IO);
+
/* Enable GMII interface */
xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
@@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
for (i = 1; i < 16; i++)
xm_outaddr(hw, port, XM_EXM(i), zero);
+ /* Clear MIB counters */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+ /* Clear two times according to Errata #3 */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+
/* configure Rx High Water Mark (XM_RX_HI_WM) */
xm_write16(hw, port, XM_RX_HI_WM, 1450);
@@ -2170,8 +2185,10 @@ static int skge_up(struct net_device *dev)
skge->tx_avail = skge->tx_ring.count - 1;
/* Enable IRQ from port */
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= portirqmask[port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
@@ -2229,8 +2246,10 @@ static int skge_down(struct net_device *dev)
else
yukon_stop(skge);
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask &= ~portirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
/* Stop transmitter */
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2678,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget)
/* restart receiver */
wmb();
- skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
- CSR_START | CSR_IRQ_CL_F);
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
*budget -= work_done;
dev->quota -= work_done;
@@ -2687,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget)
if (work_done >= to_do)
return 1; /* not done */
- netif_rx_complete(dev);
- hw->intr_mask |= portirqmask[skge->port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
- skge_read32(hw, B0_IMSK);
+ spin_lock_irq(&hw->hw_lock);
+ __netif_rx_complete(dev);
+ hw->intr_mask |= portirqmask[skge->port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
return 0;
}
@@ -2850,18 +2869,10 @@ static void skge_extirq(unsigned long data)
}
spin_unlock(&hw->phy_lock);
- local_irq_disable();
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
- local_irq_enable();
-}
-
-static inline void skge_wakeup(struct net_device *dev)
-{
- struct skge_port *skge = netdev_priv(dev);
-
- prefetch(skge->rx_ring.to_clean);
- netif_rx_schedule(dev);
+ spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2872,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0 || status == ~0) /* hotplug or shared irq */
return IRQ_NONE;
- status &= hw->intr_mask;
+ spin_lock(&hw->hw_lock);
if (status & IS_R1_F) {
+ skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
hw->intr_mask &= ~IS_R1_F;
- skge_wakeup(hw->dev[0]);
+ netif_rx_schedule(hw->dev[0]);
}
if (status & IS_R2_F) {
+ skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
hw->intr_mask &= ~IS_R2_F;
- skge_wakeup(hw->dev[1]);
+ netif_rx_schedule(hw->dev[1]);
}
if (status & IS_XA1_F)
@@ -2922,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
}
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -3290,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
hw->pdev = pdev;
spin_lock_init(&hw->phy_lock);
+ spin_lock_init(&hw->hw_lock);
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 2efdacc290e..941f12a333b 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,6 +2402,7 @@ struct skge_hw {
struct tasklet_struct ext_tasklet;
spinlock_t phy_lock;
+ spinlock_t hw_lock;
};
enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index bfeba5b9cd7..ca8160d6822 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -195,11 +195,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
pr_debug("sky2_set_power_state %d\n", state);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control);
+ power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
(power_control & PCI_PM_CAP_PME_D3cold);
- pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control);
+ power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
power_control |= PCI_PM_CTRL_PME_STATUS;
power_control &= ~(PCI_PM_CTRL_STATE_MASK);
@@ -223,7 +223,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
/* Turn off phy power saving */
- pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
/* looks like this XL is back asswards .. */
@@ -232,18 +232,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
if (hw->ports > 1)
reg1 |= PCI_Y2_PHY2_COMA;
}
- pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
+ reg1 &= P_ASPM_CONTROL_MSK;
+ sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
+ sky2_pci_write32(hw, PCI_DEV_REG5, 0);
+ }
+
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+
break;
case PCI_D3hot:
case PCI_D3cold:
/* Turn on phy power saving */
- pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
else
reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
- pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
@@ -265,7 +275,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
ret = -1;
}
- pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control);
+ sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
return ret;
}
@@ -463,16 +473,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
}
- gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
+ /* apply fixes in PHY AFE */
+ gm_phy_write(hw, port, 22, 255);
+ /* increase differential signal amplitude in 10BASE-T */
+ gm_phy_write(hw, port, 24, 0xaa99);
+ gm_phy_write(hw, port, 23, 0x2011);
- if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
- /* turn on 100 Mbps LED (LED_LINK100) */
- ledover |= PHY_M_LED_MO_100(MO_LED_ON);
- }
+ /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
+ gm_phy_write(hw, port, 24, 0xa204);
+ gm_phy_write(hw, port, 23, 0x2002);
- if (ledover)
- gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+ /* set page register to 0 */
+ gm_phy_write(hw, port, 22, 0);
+ } else {
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+ if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
+ /* turn on 100 Mbps LED (LED_LINK100) */
+ ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+ }
+
+ if (ledover)
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+
+ }
/* Enable phy interrupt on auto-negotiation complete (or link up) */
if (sky2->autoneg == AUTONEG_ENABLE)
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
@@ -953,6 +978,12 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
+ /* MAC Rx RAM Read is controlled by hardware */
+ sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
+ }
+
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
rx_set_checksum(sky2);
@@ -1035,9 +1066,10 @@ static int sky2_up(struct net_device *dev)
RB_RST_SET);
sky2_qset(hw, txqaddr[port]);
- if (hw->chip_id == CHIP_ID_YUKON_EC_U)
- sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
+ /* Set almost empty threshold */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
+ sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1);
@@ -1047,8 +1079,10 @@ static int sky2_up(struct net_device *dev)
goto err_out;
/* Enable interrupts from phy/mac for port */
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
return 0;
err_out:
@@ -1348,10 +1382,10 @@ static int sky2_down(struct net_device *dev)
netif_stop_queue(dev);
/* Disable port IRQ */
- local_irq_disable();
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
sky2_write32(hw, B0_IMSK, hw->intr_mask);
- local_irq_enable();
+ spin_unlock_irq(&hw->hw_lock);
flush_scheduled_work();
@@ -1633,10 +1667,10 @@ static void sky2_phy_task(void *arg)
out:
up(&sky2->phy_sema);
- local_irq_disable();
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
- local_irq_enable();
+ spin_unlock_irq(&hw->hw_lock);
}
@@ -1863,6 +1897,17 @@ static int sky2_poll(struct net_device *dev0, int *budget)
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+ /*
+ * Kick the STAT_LEV_TIMER_CTRL timer.
+ * This fixes my hangs on Yukon-EC (0xb6) rev 1.
+ * The if clause is there to start the timer only if it has been
+ * configured correctly and not been disabled via ethtool.
+ */
+ if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+ }
+
hwidx = sky2_read16(hw, STAT_PUT_IDX);
BUG_ON(hwidx >= STATUS_RING_SIZE);
rmb();
@@ -1945,16 +1990,19 @@ exit_loop:
sky2_tx_check(hw, 0, tx_done[0]);
sky2_tx_check(hw, 1, tx_done[1]);
+ if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ }
+
if (likely(work_done < to_do)) {
- /* need to restart TX timer */
- if (is_ec_a1(hw)) {
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
- }
+ spin_lock_irq(&hw->hw_lock);
+ __netif_rx_complete(dev0);
- netif_rx_complete(dev0);
hw->intr_mask |= Y2_IS_STAT_BMU;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
+
return 0;
} else {
*budget -= work_done;
@@ -2017,13 +2065,13 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
- pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
+ pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
pci_name(hw->pdev), pci_err);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_write_config_word(hw->pdev, PCI_STATUS,
+ sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
@@ -2032,7 +2080,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* PCI-Express uncorrectable Error occurred */
u32 pex_err;
- pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
+ pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2040,7 +2088,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* clear the interrupt */
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
0xffffffffUL);
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
@@ -2086,6 +2134,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
sky2_write32(hw, B0_IMSK, hw->intr_mask);
+
schedule_work(&sky2->phy_task);
}
@@ -2099,6 +2148,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0 || status == ~0)
return IRQ_NONE;
+ spin_lock(&hw->hw_lock);
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@@ -2127,7 +2177,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
sky2_write32(hw, B0_Y2_SP_ICR, 2);
- sky2_read32(hw, B0_IMSK);
+ spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -2170,7 +2220,7 @@ static int sky2_reset(struct sky2_hw *hw)
{
u16 status;
u8 t8, pmd_type;
- int i, err;
+ int i;
sky2_write8(hw, B0_CTST, CS_RST_CLR);
@@ -2192,25 +2242,18 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_RST_CLR);
/* clear PCI errors, if any */
- err = pci_read_config_word(hw->pdev, PCI_STATUS, &status);
- if (err)
- goto pci_err;
+ status = sky2_pci_read16(hw, PCI_STATUS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- err = pci_write_config_word(hw->pdev, PCI_STATUS,
- status | PCI_STATUS_ERROR_BITS);
- if (err)
- goto pci_err;
+ sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
+
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
/* clear any PEX errors */
- if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) {
- err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
- 0xffffffffUL);
- if (err)
- goto pci_err;
- }
+ if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
+
pmd_type = sky2_read8(hw, B2_PMD_TYP);
hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
@@ -2309,8 +2352,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
- sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
- sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
}
/* enable status unit */
@@ -2321,14 +2363,6 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
return 0;
-
-pci_err:
- /* This is to catch a BIOS bug workaround where
- * mmconfig table doesn't have other buses.
- */
- printk(KERN_ERR PFX "%s: can't access PCI config space\n",
- pci_name(hw->pdev));
- return err;
}
static u32 sky2_supported_modes(const struct sky2_hw *hw)
@@ -2852,11 +2886,11 @@ static int sky2_set_coalesce(struct net_device *dev,
(ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
return -EINVAL;
- if (ecmd->tx_max_coalesced_frames > 0xffff)
+ if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
return -EINVAL;
- if (ecmd->rx_max_coalesced_frames > 0xff)
+ if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
return -EINVAL;
- if (ecmd->rx_max_coalesced_frames_irq > 0xff)
+ if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
return -EINVAL;
if (ecmd->tx_coalesce_usecs == 0)
@@ -3198,17 +3232,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
-#ifdef __BIG_ENDIAN
- /* byte swap descriptors in hardware */
- {
- u32 reg;
-
- pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
- reg |= PCI_REV_DESC;
- pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
- }
-#endif
-
err = -ENOMEM;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw) {
@@ -3226,6 +3249,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
hw->pm_cap = pm_cap;
+ spin_lock_init(&hw->hw_lock);
+
+#ifdef __BIG_ENDIAN
+ /* byte swap descriptors in hardware */
+ {
+ u32 reg;
+
+ reg = sky2_pci_read32(hw, PCI_DEV_REG2);
+ reg |= PCI_REV_DESC;
+ sky2_pci_write32(hw, PCI_DEV_REG2, reg);
+ }
+#endif
/* ring for status responses */
hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index fd12c289a23..3edb98075e0 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -5,14 +5,22 @@
#define _SKY2_H
/* PCI config registers */
-#define PCI_DEV_REG1 0x40
-#define PCI_DEV_REG2 0x44
-#define PCI_DEV_STATUS 0x7c
-#define PCI_OS_PCI_X (1<<26)
+enum {
+ PCI_DEV_REG1 = 0x40,
+ PCI_DEV_REG2 = 0x44,
+ PCI_DEV_STATUS = 0x7c,
+ PCI_DEV_REG3 = 0x80,
+ PCI_DEV_REG4 = 0x84,
+ PCI_DEV_REG5 = 0x88,
+};
-#define PEX_LNK_STAT 0xf2
-#define PEX_UNC_ERR_STAT 0x104
-#define PEX_DEV_CTRL 0xe8
+enum {
+ PEX_DEV_CAP = 0xe4,
+ PEX_DEV_CTRL = 0xe8,
+ PEX_DEV_STA = 0xea,
+ PEX_LNK_STAT = 0xf2,
+ PEX_UNC_ERR_STAT= 0x104,
+};
/* Yukon-2 */
enum pci_dev_reg_1 {
@@ -37,6 +45,25 @@ enum pci_dev_reg_2 {
PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
};
+/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
+enum pci_dev_reg_4 {
+ /* (Link Training & Status State Machine) */
+ P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
+ /* (Active State Power Management) */
+ P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
+ P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
+ P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
+ P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
+
+ P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
+ P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
+ P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
+ P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
+ P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
+ P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
+ | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
+};
+
#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -507,6 +534,16 @@ enum {
};
#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+/* Q_F 32 bit Flag Register */
+enum {
+ F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
+ F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
+ F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
+ F_WM_REACHED = 1<<25, /* Watermark reached */
+ F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
+ F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
+ F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
+};
/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
enum {
@@ -909,10 +946,12 @@ enum {
PHY_BCOM_ID1_C0 = 0x6044,
PHY_BCOM_ID1_C5 = 0x6047,
- PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
+ PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
- PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
- PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+ PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+ PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+ PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
+ PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
};
/* Advertisement register bits */
@@ -1837,8 +1876,9 @@ struct sky2_port {
struct sky2_hw {
void __iomem *regs;
struct pci_dev *pdev;
- u32 intr_mask;
struct net_device *dev[2];
+ spinlock_t hw_lock;
+ u32 intr_mask;
int pm_cap;
int msi;
@@ -1912,4 +1952,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
}
+
+/* PCI config space access */
+static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read32(hw, Y2_CFG_SPC + reg);
+}
+
+static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read16(hw, Y2_CFG_SPC + reg);
+}
+
+static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
+{
+ sky2_write32(hw, Y2_CFG_SPC + reg, val);
+}
+
+static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
+{
+ sky2_write16(hw, Y2_CFG_SPC + reg, val);
+}
#endif
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 0702f0eeb78..44024c76d18 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4636,9 +4636,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
}
default:
- IPW_ERROR("Unknown notification: "
- "subtype=%d,flags=0x%2x,size=%d\n",
- notif->subtype, notif->flags, notif->size);
+ IPW_DEBUG_NOTIF("Unknown notification: "
+ "subtype=%d,flags=0x%2x,size=%d\n",
+ notif->subtype, notif->flags, notif->size);
}
}
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 6912399d093..6f50cc9323d 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -55,21 +55,13 @@ config DASD_DIAG
Disks under VM. If you are not running under VM or unsure what it is,
say "N".
-config DASD_EER
- tristate "Extended error reporting (EER)"
- depends on DASD
- help
- This driver provides a character device interface to the
- DASD extended error reporting. This is only needed if you want to
- use applications written for the EER facility.
-
config DASD_CMB
tristate "Compatibility interface for DASD channel measurement blocks"
depends on DASD
help
- This driver provides an additional interface to the channel
- measurement facility, which is normally accessed though sysfs, with
- a set of ioctl functions specific to the dasd driver.
+ This driver provides an additional interface to the channel measurement
+ facility, which is normally accessed though sysfs, with a set of
+ ioctl functions specific to the dasd driver.
This is only needed if you want to use applications written for
linux-2.4 dasd channel measurement facility interface.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 0c0d871e8f5..58c6780134f 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -5,7 +5,6 @@
dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
dasd_diag_mod-objs := dasd_diag.o
-dasd_eer_mod-objs := dasd_eer.o
dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
dasd_genhd.o dasd_erp.o
@@ -14,6 +13,5 @@ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
-obj-$(CONFIG_DASD_EER) += dasd_eer.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 08c88fcd896..af1d5b404ce 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/hdreg.h>
-#include <linux/notifier.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -58,7 +57,6 @@ static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
static void dasd_flush_ccw_queue(struct dasd_device *, int);
static void dasd_tasklet(struct dasd_device *);
static void do_kick_device(void *data);
-static void dasd_disable_eer(struct dasd_device *device);
/*
* SECTION: Operations on the device structure.
@@ -153,10 +151,13 @@ dasd_state_new_to_known(struct dasd_device *device)
static inline void
dasd_state_known_to_new(struct dasd_device * device)
{
- /* disable extended error reporting for this device */
- dasd_disable_eer(device);
/* Forget the discipline information. */
+ if (device->discipline)
+ module_put(device->discipline->owner);
device->discipline = NULL;
+ if (device->base_discipline)
+ module_put(device->base_discipline->owner);
+ device->base_discipline = NULL;
device->state = DASD_STATE_NEW;
dasd_free_queue(device);
@@ -871,9 +872,6 @@ dasd_handle_state_change_pending(struct dasd_device *device)
struct dasd_ccw_req *cqr;
struct list_head *l, *n;
- /* first of all call extended error reporting */
- dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL);
-
device->stopped &= ~DASD_STOPPED_PENDING;
/* restart all 'running' IO on queue */
@@ -1093,19 +1091,6 @@ restart:
}
goto restart;
}
-
- /* first of all call extended error reporting */
- if (device->eer && cqr->status == DASD_CQR_FAILED) {
- dasd_write_eer_trigger(DASD_EER_FATALERROR,
- device, cqr);
-
- /* restart request */
- cqr->status = DASD_CQR_QUEUED;
- cqr->retries = 255;
- device->stopped |= DASD_STOPPED_QUIESCE;
- goto restart;
- }
-
/* Process finished ERP request. */
if (cqr->refers) {
__dasd_process_erp(device, cqr);
@@ -1243,8 +1228,7 @@ __dasd_start_head(struct dasd_device * device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
/* check FAILFAST */
if (device->stopped & ~DASD_STOPPED_PENDING &&
- test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
- (!device->eer)) {
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
dasd_schedule_bh(device);
}
@@ -1880,9 +1864,10 @@ dasd_generic_remove (struct ccw_device *cdev)
*/
int
dasd_generic_set_online (struct ccw_device *cdev,
- struct dasd_discipline *discipline)
+ struct dasd_discipline *base_discipline)
{
+ struct dasd_discipline *discipline;
struct dasd_device *device;
int rc;
@@ -1890,6 +1875,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
if (IS_ERR(device))
return PTR_ERR(device);
+ discipline = base_discipline;
if (device->features & DASD_FEATURE_USEDIAG) {
if (!dasd_diag_discipline_pointer) {
printk (KERN_WARNING
@@ -1901,6 +1887,16 @@ dasd_generic_set_online (struct ccw_device *cdev,
}
discipline = dasd_diag_discipline_pointer;
}
+ if (!try_module_get(base_discipline->owner)) {
+ dasd_delete_device(device);
+ return -EINVAL;
+ }
+ if (!try_module_get(discipline->owner)) {
+ module_put(base_discipline->owner);
+ dasd_delete_device(device);
+ return -EINVAL;
+ }
+ device->base_discipline = base_discipline;
device->discipline = discipline;
rc = discipline->check_device(device);
@@ -1909,6 +1905,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
"dasd_generic couldn't online device %s "
"with discipline %s rc=%i\n",
cdev->dev.bus_id, discipline->name, rc);
+ module_put(discipline->owner);
+ module_put(base_discipline->owner);
dasd_delete_device(device);
return rc;
}
@@ -1986,9 +1984,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
switch (event) {
case CIO_GONE:
case CIO_NO_PATH:
- /* first of all call extended error reporting */
- dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL);
-
if (device->state < DASD_STATE_BASIC)
break;
/* Device is active. We want to keep it. */
@@ -2046,51 +2041,6 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
put_driver(drv);
}
-/*
- * notifications for extended error reports
- */
-static struct notifier_block *dasd_eer_chain;
-
-int
-dasd_register_eer_notifier(struct notifier_block *nb)
-{
- return notifier_chain_register(&dasd_eer_chain, nb);
-}
-
-int
-dasd_unregister_eer_notifier(struct notifier_block *nb)
-{
- return notifier_chain_unregister(&dasd_eer_chain, nb);
-}
-
-/*
- * Notify the registered error reporting module of a problem
- */
-void
-dasd_write_eer_trigger(unsigned int id, struct dasd_device *device,
- struct dasd_ccw_req *cqr)
-{
- if (device->eer) {
- struct dasd_eer_trigger temp;
- temp.id = id;
- temp.device = device;
- temp.cqr = cqr;
- notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER,
- (void *)&temp);
- }
-}
-
-/*
- * Tell the registered error reporting module to disable error reporting for
- * a given device and to cleanup any private data structures on that device.
- */
-static void
-dasd_disable_eer(struct dasd_device *device)
-{
- notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device);
-}
-
-
static int __init
dasd_init(void)
{
@@ -2172,11 +2122,6 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
-EXPORT_SYMBOL(dasd_register_eer_notifier);
-EXPORT_SYMBOL(dasd_unregister_eer_notifier);
-EXPORT_SYMBOL(dasd_write_eer_trigger);
-
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c811380b907..4ee0f934e32 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1108,9 +1108,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
case 0x0B:
DEV_MESSAGE(KERN_WARNING, device, "%s",
"FORMAT F - Volume is suspended duplex");
- /* call extended error reporting (EER) */
- dasd_write_eer_trigger(DASD_EER_PPRCSUSPEND, device,
- erp->refers);
break;
case 0x0C:
DEV_MESSAGE(KERN_WARNING, device, "%s",
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index e15dd797805..bc3823d3522 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -29,7 +29,6 @@
#define DASD_ECKD_CCW_PSF 0x27
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
-#define DASD_ECKD_CCW_SNSS 0x54
#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
#define DASD_ECKD_CCW_WRITE_MT 0x85
#define DASD_ECKD_CCW_READ_MT 0x86
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
deleted file mode 100644
index f70cd7716b2..00000000000
--- a/drivers/s390/block/dasd_eer.c
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*
- * character device driver for extended error reporting
- *
- *
- * Copyright (C) 2005 IBM Corporation
- * extended error reporting for DASD ECKD devices
- * Author(s): Stefan Weinhuber <wein@de.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/poll.h>
-#include <linux/notifier.h>
-
-#include <asm/uaccess.h>
-#include <asm/semaphore.h>
-#include <asm/atomic.h>
-#include <asm/ebcdic.h>
-
-#include "dasd_int.h"
-#include "dasd_eckd.h"
-
-
-MODULE_LICENSE("GPL");
-
-MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>");
-MODULE_DESCRIPTION("DASD extended error reporting module");
-
-
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eer):"
-
-
-
-
-
-/*****************************************************************************/
-/* the internal buffer */
-/*****************************************************************************/
-
-/*
- * The internal buffer is meant to store obaque blobs of data, so it doesn't
- * know of higher level concepts like triggers.
- * It consists of a number of pages that are used as a ringbuffer. Each data
- * blob is stored in a simple record that consists of an integer, which
- * contains the size of the following data, and the data bytes themselfes.
- *
- * To allow for multiple independent readers we create one internal buffer
- * each time the device is opened and destroy the buffer when the file is
- * closed again.
- *
- * One record can be written to a buffer by using the functions
- * - dasd_eer_start_record (one time per record to write the size to the buffer
- * and reserve the space for the data)
- * - dasd_eer_write_buffer (one or more times per record to write the data)
- * The data can be written in several steps but you will have to compute
- * the total size up front for the invocation of dasd_eer_start_record.
- * If the ringbuffer is full, dasd_eer_start_record will remove the required
- * number of old records.
- *
- * A record is typically read in two steps, first read the integer that
- * specifies the size of the following data, then read the data.
- * Both can be done by
- * - dasd_eer_read_buffer
- *
- * For all mentioned functions you need to get the bufferlock first and keep it
- * until a complete record is written or read.
- */
-
-
-/*
- * Alle information necessary to keep track of an internal buffer is kept in
- * a struct eerbuffer. The buffer specific to a file pointer is strored in
- * the private_data field of that file. To be able to write data to all
- * existing buffers, each buffer is also added to the bufferlist.
- * If the user doesn't want to read a complete record in one go, we have to
- * keep track of the rest of the record. residual stores the number of bytes
- * that are still to deliver. If the rest of the record is invalidated between
- * two reads then residual will be set to -1 so that the next read will fail.
- * All entries in the eerbuffer structure are protected with the bufferlock.
- * To avoid races between writing to a buffer on the one side and creating
- * and destroying buffers on the other side, the bufferlock must also be used
- * to protect the bufferlist.
- */
-
-struct eerbuffer {
- struct list_head list;
- char **buffer;
- int buffersize;
- int buffer_page_count;
- int head;
- int tail;
- int residual;
-};
-
-LIST_HEAD(bufferlist);
-
-static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
-
-DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
-
-/*
- * How many free bytes are available on the buffer.
- * needs to be called with bufferlock held
- */
-static int
-dasd_eer_get_free_bytes(struct eerbuffer *eerb)
-{
- if (eerb->head < eerb->tail) {
- return eerb->tail - eerb->head - 1;
- } else
- return eerb->buffersize - eerb->head + eerb->tail -1;
-}
-
-/*
- * How many bytes of buffer space are used.
- * needs to be called with bufferlock held
- */
-static int
-dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
-{
-
- if (eerb->head >= eerb->tail) {
- return eerb->head - eerb->tail;
- } else
- return eerb->buffersize - eerb->tail + eerb->head;
-}
-
-/*
- * The dasd_eer_write_buffer function just copies count bytes of data
- * to the buffer. Make sure to call dasd_eer_start_record first, to
- * make sure that enough free space is available.
- * needs to be called with bufferlock held
- */
-static void
-dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data)
-{
-
- unsigned long headindex,localhead;
- unsigned long rest, len;
- char *nextdata;
-
- nextdata = data;
- rest = count;
- while (rest > 0) {
- headindex = eerb->head / PAGE_SIZE;
- localhead = eerb->head % PAGE_SIZE;
- len = min(rest, (PAGE_SIZE - localhead));
- memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
- nextdata += len;
- rest -= len;
- eerb->head += len;
- if ( eerb->head == eerb->buffersize )
- eerb->head = 0; /* wrap around */
- if (eerb->head > eerb->buffersize) {
- MESSAGE(KERN_ERR, "%s", "runaway buffer head.");
- BUG();
- }
- }
-}
-
-/*
- * needs to be called with bufferlock held
- */
-static int
-dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data)
-{
-
- unsigned long tailindex,localtail;
- unsigned long rest, len, finalcount;
- char *nextdata;
-
- finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
- nextdata = data;
- rest = finalcount;
- while (rest > 0) {
- tailindex = eerb->tail / PAGE_SIZE;
- localtail = eerb->tail % PAGE_SIZE;
- len = min(rest, (PAGE_SIZE - localtail));
- memcpy(nextdata, eerb->buffer[tailindex]+localtail, len);
- nextdata += len;
- rest -= len;
- eerb->tail += len;
- if ( eerb->tail == eerb->buffersize )
- eerb->tail = 0; /* wrap around */
- if (eerb->tail > eerb->buffersize) {
- MESSAGE(KERN_ERR, "%s", "runaway buffer tail.");
- BUG();
- }
- }
- return finalcount;
-}
-
-/*
- * Whenever you want to write a blob of data to the internal buffer you
- * have to start by using this function first. It will write the number
- * of bytes that will be written to the buffer. If necessary it will remove
- * old records to make room for the new one.
- * needs to be called with bufferlock held
- */
-static int
-dasd_eer_start_record(struct eerbuffer *eerb, int count)
-{
- int tailcount;
- if (count + sizeof(count) > eerb->buffersize)
- return -ENOMEM;
- while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
- if (eerb->residual > 0) {
- eerb->tail += eerb->residual;
- if (eerb->tail >= eerb->buffersize)
- eerb->tail -= eerb->buffersize;
- eerb->residual = -1;
- }
- dasd_eer_read_buffer(eerb, sizeof(tailcount),
- (char*)(&tailcount));
- eerb->tail += tailcount;
- if (eerb->tail >= eerb->buffersize)
- eerb->tail -= eerb->buffersize;
- }
- dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count));
-
- return 0;
-};
-
-/*
- * release pages that are not used anymore
- */
-static void
-dasd_eer_free_buffer_pages(char **buf, int no_pages)
-{
- int i;
-
- for (i = 0; i < no_pages; ++i) {
- free_page((unsigned long)buf[i]);
- }
-}
-
-/*
- * allocate a new set of memory pages
- */
-static int
-dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
-{
- int i;
-
- for (i = 0; i < no_pages; ++i) {
- buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
- if (!buf[i]) {
- dasd_eer_free_buffer_pages(buf, i);
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-/*
- * empty the buffer by resetting head and tail
- * In case there is a half read data blob in the buffer, we set residual
- * to -1 to indicate that the remainder of the blob is lost.
- */
-static void
-dasd_eer_purge_buffer(struct eerbuffer *eerb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&bufferlock, flags);
- if (eerb->residual > 0)
- eerb->residual = -1;
- eerb->tail=0;
- eerb->head=0;
- spin_unlock_irqrestore(&bufferlock, flags);
-}
-
-/*
- * set the size of the buffer, newsize is the new number of pages to be used
- * we don't try to copy any data back an forth, so any resize will also purge
- * the buffer
- */
-static int
-dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize)
-{
- int i, oldcount, reuse;
- char **new;
- char **old;
- unsigned long flags;
-
- if (newsize < 1)
- return -EINVAL;
- if (eerb->buffer_page_count == newsize) {
- /* documented behaviour is that any successfull invocation
- * will purge all records */
- dasd_eer_purge_buffer(eerb);
- return 0;
- }
- new = kmalloc(newsize*sizeof(char*), GFP_KERNEL);
- if (!new)
- return -ENOMEM;
-
- reuse=min(eerb->buffer_page_count, newsize);
- for (i = 0; i < reuse; ++i) {
- new[i] = eerb->buffer[i];
- }
- if (eerb->buffer_page_count < newsize) {
- if (dasd_eer_allocate_buffer_pages(
- &new[eerb->buffer_page_count],
- newsize - eerb->buffer_page_count)) {
- kfree(new);
- return -ENOMEM;
- }
- }
-
- spin_lock_irqsave(&bufferlock, flags);
- old = eerb->buffer;
- eerb->buffer = new;
- if (eerb->residual > 0)
- eerb->residual = -1;
- eerb->tail = 0;
- eerb->head = 0;
- oldcount = eerb->buffer_page_count;
- eerb->buffer_page_count = newsize;
- spin_unlock_irqrestore(&bufferlock, flags);
-
- if (oldcount > newsize) {
- for (i = newsize; i < oldcount; ++i) {
- free_page((unsigned long)old[i]);
- }
- }
- kfree(old);
-
- return 0;
-}
-
-
-/*****************************************************************************/
-/* The extended error reporting functionality */
-/*****************************************************************************/
-
-/*
- * When a DASD device driver wants to report an error, it calls the
- * function dasd_eer_write_trigger (via a notifier mechanism) and gives the
- * respective trigger ID as parameter.
- * Currently there are four kinds of triggers:
- *
- * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
- * DASD_EER_PPRCSUSPEND: PPRC was suspended
- * DASD_EER_NOPATH: There is no path to the device left.
- * DASD_EER_STATECHANGE: The state of the device has changed.
- *
- * For the first three triggers all required information can be supplied by
- * the caller. For these triggers a record is written by the function
- * dasd_eer_write_standard_trigger.
- *
- * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE
- * trigger, we have to gather the necessary sense data first. We cannot queue
- * the necessary SNSS (sense subsystem status) request immediatly, since we
- * are likely to run in a deadlock situation. Instead, we schedule a
- * work_struct that calls the function dasd_eer_sense_subsystem_status to
- * create and start an SNSS request asynchronously.
- *
- * To avoid memory allocations at runtime, the necessary memory is allocated
- * when the extended error reporting is enabled for a device (by
- * dasd_eer_probe). There is one private eer data structure for each eer
- * enabled DASD device. It contains memory for the work_struct, one SNSS cqr
- * and a flags field that is used to coordinate the use of the cqr. The call
- * to write a state change trigger can come in at any time, so we have one flag
- * CQR_IN_USE that protects the cqr itself. When this flag indicates that the
- * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a
- * second request but sets the SNSS_REQUESTED flag instead.
- *
- * When the request is finished, the callback function dasd_eer_SNSS_cb
- * is called. This function will invoke the function
- * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also
- * check the SNSS_REQUESTED flag and if it is set it will call
- * dasd_eer_sense_subsystem_status again.
- *
- * To avoid race conditions during the handling of the lock, the flags must
- * be protected by the snsslock.
- */
-
-struct dasd_eer_private {
- struct dasd_ccw_req *cqr;
- unsigned long flags;
- struct work_struct worker;
-};
-
-static void dasd_eer_destroy(struct dasd_device *device,
- struct dasd_eer_private *eer);
-static int
-dasd_eer_write_trigger(struct dasd_eer_trigger *trigger);
-static void dasd_eer_sense_subsystem_status(void *data);
-static int dasd_eer_notify(struct notifier_block *self,
- unsigned long action, void *data);
-
-struct workqueue_struct *dasd_eer_workqueue;
-
-#define SNSS_DATA_SIZE 44
-static spinlock_t snsslock = SPIN_LOCK_UNLOCKED;
-
-#define DASD_EER_BUSID_SIZE 10
-struct dasd_eer_header {
- __u32 total_size;
- __u32 trigger;
- __u64 tv_sec;
- __u64 tv_usec;
- char busid[DASD_EER_BUSID_SIZE];
-} __attribute__ ((packed));
-
-static struct notifier_block dasd_eer_nb = {
- .notifier_call = dasd_eer_notify,
-};
-
-/*
- * flags for use with dasd_eer_private
- */
-#define CQR_IN_USE 0
-#define SNSS_REQUESTED 1
-
-/*
- * This function checks if extended error reporting is available for a given
- * dasd_device. If yes, then it creates and returns a struct dasd_eer,
- * otherwise it returns an -EPERM error pointer.
- */
-struct dasd_eer_private *
-dasd_eer_probe(struct dasd_device *device)
-{
- struct dasd_eer_private *private;
-
- if (!(device && device->discipline
- && !strcmp(device->discipline->name, "ECKD"))) {
- return ERR_PTR(-EPERM);
- }
- /* allocate the private data structure */
- private = (struct dasd_eer_private *)kmalloc(
- sizeof(struct dasd_eer_private), GFP_KERNEL);
- if (!private) {
- return ERR_PTR(-ENOMEM);
- }
- INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status,
- (void *)device);
- private->cqr = dasd_kmalloc_request("ECKD",
- 1 /* SNSS */ ,
- SNSS_DATA_SIZE ,
- device);
- if (!private->cqr) {
- kfree(private);
- return ERR_PTR(-ENOMEM);
- }
- private->flags = 0;
- return private;
-};
-
-/*
- * If our private SNSS request is queued, remove it from the
- * dasd ccw queue so we can free the requests memory.
- */
-static void
-dasd_eer_dequeue_SNSS_request(struct dasd_device *device,
- struct dasd_eer_private *eer)
-{
- struct list_head *lst, *nxt;
- struct dasd_ccw_req *cqr, *erpcqr;
- dasd_erp_fn_t erp_fn;
-
- spin_lock_irq(get_ccwdev_lock(device->cdev));
- list_for_each_safe(lst, nxt, &device->ccw_queue) {
- cqr = list_entry(lst, struct dasd_ccw_req, list);
- /* we are looking for two kinds or requests */
- /* first kind: our SNSS request: */
- if (cqr == eer->cqr) {
- if (cqr->status == DASD_CQR_IN_IO)
- device->discipline->term_IO(cqr);
- list_del(&cqr->list);
- break;
- }
- /* second kind: ERP requests for our SNSS request */
- if (cqr->refers) {
- /* If this erp request chain ends in our cqr, then */
- /* cal the erp_postaction to clean it up */
- erpcqr = cqr;
- while (erpcqr->refers) {
- erpcqr = erpcqr->refers;
- }
- if (erpcqr == eer->cqr) {
- erp_fn = device->discipline->erp_postaction(
- cqr);
- erp_fn(cqr);
- }
- continue;
- }
- }
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
-}
-
-/*
- * This function dismantles a struct dasd_eer that was created by
- * dasd_eer_probe. Since we want to free our private data structure,
- * we must make sure that the memory is not in use anymore.
- * We have to flush the work queue and remove a possible SNSS request
- * from the dasd queue.
- */
-static void
-dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer)
-{
- flush_workqueue(dasd_eer_workqueue);
- dasd_eer_dequeue_SNSS_request(device, eer);
- dasd_kfree_request(eer->cqr, device);
- kfree(eer);
-};
-
-/*
- * enable the extended error reporting for a particular device
- */
-static int
-dasd_eer_enable_on_device(struct dasd_device *device)
-{
- void *eer;
- if (!device)
- return -ENODEV;
- if (device->eer)
- return 0;
- if (!try_module_get(THIS_MODULE)) {
- return -EINVAL;
- }
- eer = (void *)dasd_eer_probe(device);
- if (IS_ERR(eer)) {
- module_put(THIS_MODULE);
- return PTR_ERR(eer);
- }
- device->eer = eer;
- return 0;
-}
-
-/*
- * enable the extended error reporting for a particular device
- */
-static int
-dasd_eer_disable_on_device(struct dasd_device *device)
-{
- struct dasd_eer_private *eer = device->eer;
-
- if (!device)
- return -ENODEV;
- if (!device->eer)
- return 0;
- device->eer = NULL;
- dasd_eer_destroy(device,eer);
- module_put(THIS_MODULE);
-
- return 0;
-}
-
-/*
- * Set extended error reporting (eer)
- * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
- */
-static int
-dasd_ioctl_set_eer(struct block_device *bdev, int no, long args)
-{
- struct dasd_device *device;
- int intval;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (bdev != bdev->bd_contains)
- /* Error-reporting is not allowed for partitions */
- return -EINVAL;
- if (get_user(intval, (int __user *) args))
- return -EFAULT;
- device = bdev->bd_disk->private_data;
- if (device == NULL)
- return -ENODEV;
-
- intval = (intval != 0);
- DEV_MESSAGE (KERN_DEBUG, device,
- "set eer on device to %d", intval);
- if (intval)
- return dasd_eer_enable_on_device(device);
- else
- return dasd_eer_disable_on_device(device);
-}
-
-/*
- * Get value of extended error reporting.
- * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
- */
-static int
-dasd_ioctl_get_eer(struct block_device *bdev, int no, long args)
-{
- struct dasd_device *device;
-
- device = bdev->bd_disk->private_data;
- if (device == NULL)
- return -ENODEV;
- return put_user((device->eer != NULL), (int __user *) args);
-}
-
-/*
- * The following function can be used for those triggers that have
- * all necessary data available when the function is called.
- * If the parameter cqr is not NULL, the chain of requests will be searched
- * for valid sense data, and all valid sense data sets will be added to
- * the triggers data.
- */
-static int
-dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device,
- struct dasd_ccw_req *cqr)
-{
- struct dasd_ccw_req *temp_cqr;
- int data_size;
- struct timeval tv;
- struct dasd_eer_header header;
- unsigned long flags;
- struct eerbuffer *eerb;
-
- /* go through cqr chain and count the valid sense data sets */
- temp_cqr = cqr;
- data_size = 0;
- while (temp_cqr) {
- if (temp_cqr->irb.esw.esw0.erw.cons)
- data_size += 32;
- temp_cqr = temp_cqr->refers;
- }
-
- header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
- header.trigger = trigger;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
- strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
-
- spin_lock_irqsave(&bufferlock, flags);
- list_for_each_entry(eerb, &bufferlist, list) {
- dasd_eer_start_record(eerb, header.total_size);
- dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header));
- temp_cqr = cqr;
- while (temp_cqr) {
- if (temp_cqr->irb.esw.esw0.erw.cons)
- dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw);
- temp_cqr = temp_cqr->refers;
- }
- dasd_eer_write_buffer(eerb, 4,"EOR");
- }
- spin_unlock_irqrestore(&bufferlock, flags);
-
- wake_up_interruptible(&dasd_eer_read_wait_queue);
-
- return 0;
-}
-
-/*
- * This function writes a DASD_EER_STATECHANGE trigger.
- */
-static void
-dasd_eer_write_SNSS_trigger(struct dasd_device *device,
- struct dasd_ccw_req *cqr)
-{
- int data_size;
- int snss_rc;
- struct timeval tv;
- struct dasd_eer_header header;
- unsigned long flags;
- struct eerbuffer *eerb;
-
- snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
- if (snss_rc)
- data_size = 0;
- else
- data_size = SNSS_DATA_SIZE;
-
- header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
- header.trigger = DASD_EER_STATECHANGE;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
- strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
-
- spin_lock_irqsave(&bufferlock, flags);
- list_for_each_entry(eerb, &bufferlist, list) {
- dasd_eer_start_record(eerb, header.total_size);
- dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header));
- if (!snss_rc)
- dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data);
- dasd_eer_write_buffer(eerb, 4,"EOR");
- }
- spin_unlock_irqrestore(&bufferlock, flags);
-
- wake_up_interruptible(&dasd_eer_read_wait_queue);
-}
-
-/*
- * callback function for use with SNSS request
- */
-static void
-dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data)
-{
- struct dasd_device *device;
- struct dasd_eer_private *private;
- unsigned long irqflags;
-
- device = (struct dasd_device *)data;
- private = (struct dasd_eer_private *)device->eer;
- dasd_eer_write_SNSS_trigger(device, cqr);
- spin_lock_irqsave(&snsslock, irqflags);
- if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) {
- clear_bit(CQR_IN_USE, &private->flags);
- spin_unlock_irqrestore(&snsslock, irqflags);
- return;
- };
- clear_bit(CQR_IN_USE, &private->flags);
- spin_unlock_irqrestore(&snsslock, irqflags);
- dasd_eer_sense_subsystem_status(device);
- return;
-}
-
-/*
- * clean a used cqr before using it again
- */
-static void
-dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr)
-{
- struct ccw1 *cpaddr = cqr->cpaddr;
- void *data = cqr->data;
-
- memset(cqr, 0, sizeof(struct dasd_ccw_req));
- memset(cpaddr, 0, sizeof(struct ccw1));
- memset(data, 0, SNSS_DATA_SIZE);
- cqr->cpaddr = cpaddr;
- cqr->data = data;
- strncpy((char *) &cqr->magic, "ECKD", 4);
- ASCEBC((char *) &cqr->magic, 4);
- set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
-}
-
-/*
- * build and start an SNSS request
- * This function is called from a work queue so we have to
- * pass the dasd_device pointer as a void pointer.
- */
-static void
-dasd_eer_sense_subsystem_status(void *data)
-{
- struct dasd_device *device;
- struct dasd_eer_private *private;
- struct dasd_ccw_req *cqr;
- struct ccw1 *ccw;
- unsigned long irqflags;
-
- device = (struct dasd_device *)data;
- private = (struct dasd_eer_private *)device->eer;
- if (!private) /* device not eer enabled any more */
- return;
- cqr = private->cqr;
- spin_lock_irqsave(&snsslock, irqflags);
- if(test_and_set_bit(CQR_IN_USE, &private->flags)) {
- set_bit(SNSS_REQUESTED, &private->flags);
- spin_unlock_irqrestore(&snsslock, irqflags);
- return;
- };
- spin_unlock_irqrestore(&snsslock, irqflags);
- dasd_eer_clean_SNSS_request(cqr);
- cqr->device = device;
- cqr->retries = 255;
- cqr->expires = 10 * HZ;
-
- ccw = cqr->cpaddr;
- ccw->cmd_code = DASD_ECKD_CCW_SNSS;
- ccw->count = SNSS_DATA_SIZE;
- ccw->flags = 0;
- ccw->cda = (__u32)(addr_t)cqr->data;
-
- cqr->buildclk = get_clock();
- cqr->status = DASD_CQR_FILLED;
- cqr->callback = dasd_eer_SNSS_cb;
- cqr->callback_data = (void *)device;
- dasd_add_request_head(cqr);
-
- return;
-}
-
-/*
- * This function is called for all triggers. It calls the appropriate
- * function that writes the actual trigger records.
- */
-static int
-dasd_eer_write_trigger(struct dasd_eer_trigger *trigger)
-{
- int rc;
- struct dasd_eer_private *private = trigger->device->eer;
-
- switch (trigger->id) {
- case DASD_EER_FATALERROR:
- case DASD_EER_PPRCSUSPEND:
- rc = dasd_eer_write_standard_trigger(
- trigger->id, trigger->device, trigger->cqr);
- break;
- case DASD_EER_NOPATH:
- rc = dasd_eer_write_standard_trigger(
- trigger->id, trigger->device, NULL);
- break;
- case DASD_EER_STATECHANGE:
- if (queue_work(dasd_eer_workqueue, &private->worker)) {
- rc=0;
- } else {
- /* If the work_struct was already queued, it can't
- * be queued again. But this is OK since we don't
- * need to have it queued twice.
- */
- rc = -EBUSY;
- }
- break;
- default: /* unknown trigger, so we write it without any sense data */
- rc = dasd_eer_write_standard_trigger(
- trigger->id, trigger->device, NULL);
- break;
- }
- return rc;
-}
-
-/*
- * This function is registered with the dasd device driver and gets called
- * for all dasd eer notifications.
- */
-static int dasd_eer_notify(struct notifier_block *self,
- unsigned long action, void *data)
-{
- switch (action) {
- case DASD_EER_DISABLE:
- dasd_eer_disable_on_device((struct dasd_device *)data);
- break;
- case DASD_EER_TRIGGER:
- dasd_eer_write_trigger((struct dasd_eer_trigger *)data);
- break;
- }
- return NOTIFY_OK;
-}
-
-
-/*****************************************************************************/
-/* the device operations */
-/*****************************************************************************/
-
-/*
- * On the one side we need a lock to access our internal buffer, on the
- * other side a copy_to_user can sleep. So we need to copy the data we have
- * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
- */
-static char readbuffer[PAGE_SIZE];
-DECLARE_MUTEX(readbuffer_mutex);
-
-
-static int
-dasd_eer_open(struct inode *inp, struct file *filp)
-{
- struct eerbuffer *eerb;
- unsigned long flags;
-
- eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL);
- eerb->head = 0;
- eerb->tail = 0;
- eerb->residual = 0;
- eerb->buffer_page_count = 1;
- eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
- eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*),
- GFP_KERNEL);
- if (!eerb->buffer)
- return -ENOMEM;
- if (dasd_eer_allocate_buffer_pages(eerb->buffer,
- eerb->buffer_page_count)) {
- kfree(eerb->buffer);
- return -ENOMEM;
- }
- filp->private_data = eerb;
- spin_lock_irqsave(&bufferlock, flags);
- list_add(&eerb->list, &bufferlist);
- spin_unlock_irqrestore(&bufferlock, flags);
-
- return nonseekable_open(inp,filp);
-}
-
-static int
-dasd_eer_close(struct inode *inp, struct file *filp)
-{
- struct eerbuffer *eerb;
- unsigned long flags;
-
- eerb = (struct eerbuffer *)filp->private_data;
- spin_lock_irqsave(&bufferlock, flags);
- list_del(&eerb->list);
- spin_unlock_irqrestore(&bufferlock, flags);
- dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
- kfree(eerb->buffer);
- kfree(eerb);
-
- return 0;
-}
-
-static long
-dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int intval;
- struct eerbuffer *eerb;
-
- eerb = (struct eerbuffer *)filp->private_data;
- switch (cmd) {
- case DASD_EER_PURGE:
- dasd_eer_purge_buffer(eerb);
- return 0;
- case DASD_EER_SETBUFSIZE:
- if (get_user(intval, (int __user *)arg))
- return -EFAULT;
- return dasd_eer_resize_buffer(eerb, intval);
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-static ssize_t
-dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
-{
- int tc,rc;
- int tailcount,effective_count;
- unsigned long flags;
- struct eerbuffer *eerb;
-
- eerb = (struct eerbuffer *)filp->private_data;
- if(down_interruptible(&readbuffer_mutex))
- return -ERESTARTSYS;
-
- spin_lock_irqsave(&bufferlock, flags);
-
- if (eerb->residual < 0) { /* the remainder of this record */
- /* has been deleted */
- eerb->residual = 0;
- spin_unlock_irqrestore(&bufferlock, flags);
- up(&readbuffer_mutex);
- return -EIO;
- } else if (eerb->residual > 0) {
- /* OK we still have a second half of a record to deliver */
- effective_count = min(eerb->residual, (int)count);
- eerb->residual -= effective_count;
- } else {
- tc = 0;
- while (!tc) {
- tc = dasd_eer_read_buffer(eerb,
- sizeof(tailcount), (char*)(&tailcount));
- if (!tc) {
- /* no data available */
- spin_unlock_irqrestore(&bufferlock, flags);
- up(&readbuffer_mutex);
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- rc = wait_event_interruptible(
- dasd_eer_read_wait_queue,
- eerb->head != eerb->tail);
- if (rc) {
- return rc;
- }
- if(down_interruptible(&readbuffer_mutex))
- return -ERESTARTSYS;
- spin_lock_irqsave(&bufferlock, flags);
- }
- }
- WARN_ON(tc != sizeof(tailcount));
- effective_count = min(tailcount,(int)count);
- eerb->residual = tailcount - effective_count;
- }
-
- tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer);
- WARN_ON(tc != effective_count);
-
- spin_unlock_irqrestore(&bufferlock, flags);
-
- if (copy_to_user(buf, readbuffer, effective_count)) {
- up(&readbuffer_mutex);
- return -EFAULT;
- }
-
- up(&readbuffer_mutex);
- return effective_count;
-}
-
-static unsigned int
-dasd_eer_poll (struct file *filp, poll_table *ptable)
-{
- unsigned int mask;
- unsigned long flags;
- struct eerbuffer *eerb;
-
- eerb = (struct eerbuffer *)filp->private_data;
- poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
- spin_lock_irqsave(&bufferlock, flags);
- if (eerb->head != eerb->tail)
- mask = POLLIN | POLLRDNORM ;
- else
- mask = 0;
- spin_unlock_irqrestore(&bufferlock, flags);
- return mask;
-}
-
-static struct file_operations dasd_eer_fops = {
- .open = &dasd_eer_open,
- .release = &dasd_eer_close,
- .unlocked_ioctl = &dasd_eer_ioctl,
- .compat_ioctl = &dasd_eer_ioctl,
- .read = &dasd_eer_read,
- .poll = &dasd_eer_poll,
- .owner = THIS_MODULE,
-};
-
-static struct miscdevice dasd_eer_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "dasd_eer",
- .fops = &dasd_eer_fops,
-};
-
-
-/*****************************************************************************/
-/* Init and exit */
-/*****************************************************************************/
-
-static int
-__init dasd_eer_init(void)
-{
- int rc;
-
- dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer");
- if (!dasd_eer_workqueue) {
- MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not "
- "create workqueue \n");
- rc = -ENOMEM;
- goto out;
- }
-
- rc = dasd_register_eer_notifier(&dasd_eer_nb);
- if (rc) {
- MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
- "register error reporting");
- goto queue;
- }
-
- dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer);
- dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer);
-
- /* we don't need our own character device,
- * so we just register as misc device */
- rc = misc_register(&dasd_eer_dev);
- if (rc) {
- MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
- "register misc device");
- goto unregister;
- }
-
- return 0;
-
-unregister:
- dasd_unregister_eer_notifier(&dasd_eer_nb);
- dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
- dasd_ioctl_set_eer);
- dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
- dasd_ioctl_get_eer);
-queue:
- destroy_workqueue(dasd_eer_workqueue);
-out:
- return rc;
-
-}
-module_init(dasd_eer_init);
-
-static void
-__exit dasd_eer_exit(void)
-{
- dasd_unregister_eer_notifier(&dasd_eer_nb);
- dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
- dasd_ioctl_set_eer);
- dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
- dasd_ioctl_get_eer);
- destroy_workqueue(dasd_eer_workqueue);
-
- WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
-}
-module_exit(dasd_eer_exit);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index d1b08fa13fd..0592354cc60 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -275,34 +275,6 @@ struct dasd_discipline {
extern struct dasd_discipline *dasd_diag_discipline_pointer;
-
-/*
- * Notification numbers for extended error reporting notifications:
- * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
- * eer pointer) is freed. The error reporting module needs to do all necessary
- * cleanup steps.
- * The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
- */
-#define DASD_EER_DISABLE 0
-#define DASD_EER_TRIGGER 1
-
-/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
-#define DASD_EER_FATALERROR 1
-#define DASD_EER_NOPATH 2
-#define DASD_EER_STATECHANGE 3
-#define DASD_EER_PPRCSUSPEND 4
-
-/*
- * The dasd_eer_trigger structure contains all data that we need to send
- * along with an DASD_EER_TRIGGER notification.
- */
-struct dasd_eer_trigger {
- unsigned int id;
- struct dasd_device *device;
- struct dasd_ccw_req *cqr;
-};
-
-
struct dasd_device {
/* Block device stuff. */
struct gendisk *gdp;
@@ -316,11 +288,9 @@ struct dasd_device {
unsigned long flags; /* per device flags */
unsigned short features; /* copy of devmap-features (read-only!) */
- /* extended error reporting stuff (eer) */
- void *eer;
-
/* Device discipline stuff. */
struct dasd_discipline *discipline;
+ struct dasd_discipline *base_discipline;
char *private;
/* Device state and target state. */
@@ -519,12 +489,6 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
void dasd_generic_auto_online (struct ccw_driver *);
-int dasd_register_eer_notifier(struct notifier_block *);
-int dasd_unregister_eer_notifier(struct notifier_block *);
-void dasd_write_eer_trigger(unsigned int , struct dasd_device *,
- struct dasd_ccw_req *);
-
-
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 45ce032772f..9ed37dc9a1b 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -165,8 +165,13 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
q_no = q->q_no;
if(!q->is_input_q)
q_no += irq->no_input_qs;
+again:
ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
rc = qdio_check_ccq(q, ccq);
+ if (rc == 1) {
+ QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
+ goto again;
+ }
if (rc < 0) {
QDIO_DBF_TEXT2(1,trace,"eqberr");
sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
@@ -195,8 +200,13 @@ qdio_do_sqbs(struct qdio_q *q, unsigned char state,
q_no = q->q_no;
if(!q->is_input_q)
q_no += irq->no_input_qs;
+again:
ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
rc = qdio_check_ccq(q, ccq);
+ if (rc == 1) {
+ QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
+ goto again;
+ }
if (rc < 0) {
QDIO_DBF_TEXT3(1,trace,"sqberr");
sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
@@ -1187,8 +1197,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
if (!no_used)
return 1;
-
- if (!q->siga_sync)
+ if (!q->siga_sync && !irq->is_qebsm)
/* we'll check for more primed buffers in qeth_stop_polling */
return 0;
if (irq->is_qebsm) {
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index f6900538be9..87a8c3d2072 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -2068,14 +2068,12 @@ static int esp_reset(struct scsi_cmnd *SCptr)
{
struct esp *esp = (struct esp *) SCptr->device->host->hostdata;
+ spin_lock_irq(esp->ehost->host_lock);
(void) esp_do_resetbus(esp);
-
spin_unlock_irq(esp->ehost->host_lock);
wait_event(esp->reset_queue, (esp->resetting_bus == 0));
- spin_lock_irq(esp->ehost->host_lock);
-
return SUCCESS;
}
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 7ddd5a69352..5f1d7580218 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2514,7 +2514,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
assert(sg != NULL);
if (qc->flags & ATA_QCFLAG_SINGLE)
- assert(qc->n_elem == 1);
+ assert(qc->n_elem <= 1);
VPRINTK("unmapping %u sg elements\n", qc->n_elem);
@@ -2537,7 +2537,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
kunmap_atomic(addr, KM_IRQ0);
}
} else {
- if (sg_dma_len(&sg[0]) > 0)
+ if (qc->n_elem)
dma_unmap_single(ap->host_set->dev,
sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
dir);
@@ -2570,7 +2570,7 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
unsigned int idx;
assert(qc->__sg != NULL);
- assert(qc->n_elem > 0);
+ assert(qc->n_elem > 0 || qc->pad_len > 0);
idx = 0;
ata_for_each_sg(sg, qc) {
@@ -2715,6 +2715,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
int dir = qc->dma_dir;
struct scatterlist *sg = qc->__sg;
dma_addr_t dma_address;
+ int trim_sg = 0;
/* we must lengthen transfers to end on a 32-bit boundary */
qc->pad_len = sg->length & 3;
@@ -2734,13 +2735,15 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
sg_dma_len(psg) = ATA_DMA_PAD_SZ;
/* trim sg */
sg->length -= qc->pad_len;
+ if (sg->length == 0)
+ trim_sg = 1;
DPRINTK("padding done, sg->length=%u pad_len=%u\n",
sg->length, qc->pad_len);
}
- if (!sg->length) {
- sg_dma_address(sg) = 0;
+ if (trim_sg) {
+ qc->n_elem--;
goto skip_map;
}
@@ -2753,9 +2756,9 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
}
sg_dma_address(sg) = dma_address;
-skip_map:
sg_dma_len(sg) = sg->length;
+skip_map:
DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index de05e2883f9..80480f0fb2b 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -277,7 +277,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
u8 *prd = pp->pkt + QS_CPB_BYTES;
assert(qc->__sg != NULL);
- assert(qc->n_elem > 0);
+ assert(qc->n_elem > 0 || qc->pad_len > 0);
nelem = 0;
ata_for_each_sg(sg, qc) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 791c4dc550a..94f5e8ed83a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -90,7 +90,7 @@ static int spi_suspend(struct device *dev, pm_message_t message)
int value;
struct spi_driver *drv = to_spi_driver(dev->driver);
- if (!drv->suspend)
+ if (!drv || !drv->suspend)
return 0;
/* suspend will stop irqs and dma; no more i/o */
@@ -105,7 +105,7 @@ static int spi_resume(struct device *dev)
int value;
struct spi_driver *drv = to_spi_driver(dev->driver);
- if (!drv->resume)
+ if (!drv || !drv->resume)
return 0;
/* resume may restart the i/o queue */
@@ -449,7 +449,6 @@ void spi_unregister_master(struct spi_master *master)
{
(void) device_for_each_child(master->cdev.dev, NULL, __unregister);
class_device_unregister(&master->cdev);
- master->cdev.dev = NULL;
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 556895e9964..1f8d805c61e 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -1321,8 +1321,6 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
mdelay( 15);
}
-#ifdef CONFIG_PPC_OF
-
static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo)
{
u32 tmp, tmp2;
@@ -1836,6 +1834,8 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo)
radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
}
+#ifdef CONFIG_PPC_OF
+
static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
{
OUTREG(MC_CNTL, rinfo->save_regs[46]);
@@ -2728,13 +2728,23 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
printk("radeonfb: Dynamic Clock Power Management disabled\n");
}
+#if defined(CONFIG_PM)
/* Check if we can power manage on suspend/resume. We can do
* D2 on M6, M7 and M9, and we can resume from D3 cold a few other
* "Mac" cards, but that's all. We need more infos about what the
* BIOS does tho. Right now, all this PM stuff is pmac-only for that
* reason. --BenH
*/
-#if defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC)
+ /* Special case for Samsung P35 laptops
+ */
+ if ((rinfo->pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (rinfo->pdev->device == PCI_CHIP_RV350_NP) &&
+ (rinfo->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) &&
+ (rinfo->pdev->subsystem_device == 0xc00c)) {
+ rinfo->reinit_func = radeon_reinitialize_M10;
+ rinfo->pm_mode |= radeon_pm_off;
+ }
+#if defined(CONFIG_PPC_PMAC)
if (_machine == _MACH_Pmac && rinfo->of_node) {
if (rinfo->is_mobility && rinfo->pm_reg &&
rinfo->family <= CHIP_FAMILY_RV250)
@@ -2778,7 +2788,8 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
OUTREG(TV_DAC_CNTL, INREG(TV_DAC_CNTL) | 0x07000000);
#endif
}
-#endif /* defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC) */
+#endif /* defined(CONFIG_PPC_PMAC) */
+#endif /* defined(CONFIG_PM) */
}
void radeonfb_pm_exit(struct radeonfb_info *rinfo)