aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_irq.c56
-rw-r--r--drivers/block/cciss.c5
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/ioat_dma.c5
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/firmware/dmi_scan.c11
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/md/bitmap.c22
-rw-r--r--drivers/message/fusion/mptscsih.c3
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c1
-rw-r--r--drivers/misc/sgi-xp/xp.h7
-rw-r--r--drivers/misc/sgi-xp/xp_main.c7
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c34
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c70
-rw-r--r--drivers/misc/sgi-xp/xpc.h12
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c15
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c290
-rw-r--r--drivers/net/bnx2.c6
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/phy/mdio_bus.c5
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/tlan.c23
-rw-r--r--drivers/pci/hotplug/acpiphp.h2
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c5
-rw-r--r--drivers/pci/hotplug/pciehp_core.c23
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/quirks.c170
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c16
-rw-r--r--drivers/scsi/libiscsi.c11
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/features.c6
-rw-r--r--drivers/xen/grant-table.c1
40 files changed, 728 insertions, 160 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 11acaee14d6..bf79d83bdfb 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -384,6 +384,27 @@ acpi_pci_free_irq(struct acpi_prt_entry *entry,
return irq;
}
+#ifdef CONFIG_X86_IO_APIC
+extern int noioapicquirk;
+
+static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
+{
+ struct pci_bus *bus_it;
+
+ for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
+ if (!bus_it->self)
+ return 0;
+
+ printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor,
+ bus_it->self->device);
+
+ if (bus_it->self->irq_reroute_variant)
+ return bus_it->self->irq_reroute_variant;
+ }
+ return 0;
+}
+#endif /* CONFIG_X86_IO_APIC */
+
/*
* acpi_pci_irq_lookup
* success: return IRQ >= 0
@@ -413,6 +434,41 @@ acpi_pci_irq_lookup(struct pci_bus *bus,
}
ret = func(entry, triggering, polarity, link);
+
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the
+ * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel
+ * does during interrupt handling). When this INTx generation cannot be
+ * disabled, we reroute these interrupts to their legacy equivalent to
+ * get rid of spurious interrupts.
+ */
+ if (!noioapicquirk) {
+ switch (bridge_has_boot_interrupt_variant(bus)) {
+ case 0:
+ /* no rerouting necessary */
+ break;
+
+ case INTEL_IRQ_REROUTE_VARIANT:
+ /*
+ * Remap according to INTx routing table in 6700PXH
+ * specs, intel order number 302628-002, section
+ * 2.15.2. Other chipsets (80332, ...) have the same
+ * mapping and are handled here as well.
+ */
+ printk(KERN_INFO "pci irq %d -> rerouted to legacy "
+ "irq %d\n", ret, (ret % 4) + 16);
+ ret = (ret % 4) + 16;
+ break;
+
+ default:
+ printk(KERN_INFO "not rerouting irq %d to legacy irq: "
+ "unknown mapping\n", ret);
+ break;
+ }
+ }
+#endif /* CONFIG_X86_IO_APIC */
+
return ret;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9364dc55425..9f7c543cc04 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1693,6 +1693,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
for (i = 0; i <= h->highest_lun; i++) {
int j;
drv_found = 0;
+
+ /* skip holes in the array from already deleted drives */
+ if (h->drv[i].raid_level == -1)
+ continue;
+
for (j = 0; j < num_luns; j++) {
memcpy(&lunid, &ld_buff->LUN[j][0], 4);
lunid = le32_to_cpu(lunid);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 5317e08221e..65799651737 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device)
init_completion(&device->done);
kref_init(&device->refcount);
+
+ mutex_lock(&dma_list_mutex);
device->dev_id = id++;
+ mutex_unlock(&dma_list_mutex);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index ecd743f7cc6..6607fdd00b1 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -1341,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
*/
#define IOAT_TEST_SIZE 2000
+DECLARE_COMPLETION(test_completion);
static void ioat_dma_test_callback(void *dma_async_param)
{
printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
dma_async_param);
+ complete(&test_completion);
}
/**
@@ -1410,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto free_resources;
}
device->common.device_issue_pending(dma_chan);
- msleep(1);
+
+ wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
!= DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c7a9306d951..6be31726220 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
+ dma_addr_t dest;
+ src_cnt = unmap->unmap_src_cnt;
+ dest = iop_desc_get_dest_addr(unmap, iop_chan);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- addr = iop_desc_get_dest_addr(unmap, iop_chan);
- dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ enum dma_data_direction dir;
+
+ if (src_cnt > 1) /* is xor? */
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) {
addr = iop_desc_get_src_addr(unmap,
iop_chan,
src_cnt);
+ if (addr == dest)
+ continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0328da020a1..bcda1742641 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
+ dma_addr_t dest;
+ src_cnt = unmap->unmap_src_cnt;
+ dest = mv_desc_get_dest_addr(unmap);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- addr = mv_desc_get_dest_addr(unmap);
- dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ enum dma_data_direction dir;
+
+ if (src_cnt > 1) /* is xor ? */
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_FROM_DEVICE;
+ dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) {
addr = mv_desc_get_src_addr(unmap,
src_cnt);
+ if (addr == dest)
+ continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 8daf4793ac3..4a597d8c2f7 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -467,6 +467,17 @@ const char *dmi_get_system_info(int field)
}
EXPORT_SYMBOL(dmi_get_system_info);
+/**
+ * dmi_name_in_serial - Check if string is in the DMI product serial
+ * information.
+ */
+int dmi_name_in_serial(const char *str)
+{
+ int f = DMI_PRODUCT_SERIAL;
+ if (dmi_ident[f] && strstr(dmi_ident[f], str))
+ return 1;
+ return 0;
+}
/**
* dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information.
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 553dd4bc307..afa8a12cd00 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -717,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
- value = 1;
+ value = dev_priv->has_gem;
break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
@@ -830,6 +830,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->regs = ioremap(base, size);
+#ifdef CONFIG_HIGHMEM64G
+ /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
+ dev_priv->has_gem = 0;
+#else
+ /* enable GEM by default */
+ dev_priv->has_gem = 1;
+#endif
+
i915_gem_load(dev);
/* Init HWS */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index adc972cc6bf..b3cc4731aa7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -106,6 +106,8 @@ struct intel_opregion {
typedef struct drm_i915_private {
struct drm_device *dev;
+ int has_gem;
+
void __iomem *regs;
drm_local_map_t *sarea;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad672d85482..24fe8c10b4b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2309,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
obj_priv = obj->driver_private;
- args->busy = obj_priv->active;
+ /* Don't count being on the flushing list against the object being
+ * done. Otherwise, a buffer left on the flushing list but not getting
+ * flushed (because nobody's flushing that domain) won't ever return
+ * unbusy and get reused by libdrm's bo cache. The other expected
+ * consumer of this interface, OpenGL's occlusion queries, also specs
+ * that the objects get unbusy "eventually" without any interference.
+ */
+ args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ac89a5deaca..ab7c8e4a61f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -208,16 +208,19 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
*/
/* IO operations when bitmap is stored near all superblocks */
-static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index)
+static struct page *read_sb_page(mddev_t *mddev, long offset,
+ struct page *page,
+ unsigned long index, int size)
{
/* choose a good rdev and read the page from there */
mdk_rdev_t *rdev;
struct list_head *tmp;
- struct page *page = alloc_page(GFP_KERNEL);
sector_t target;
if (!page)
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
return ERR_PTR(-ENOMEM);
rdev_for_each(rdev, tmp, mddev) {
@@ -227,7 +230,9 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
- if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
+ if (sync_page_io(rdev->bdev, target,
+ roundup(size, bdev_hardsect_size(rdev->bdev)),
+ page, READ)) {
page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will
* quietly no-op */
@@ -544,7 +549,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
} else {
- bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
+ bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset,
+ NULL,
+ 0, sizeof(bitmap_super_t));
}
if (IS_ERR(bitmap->sb_page)) {
err = PTR_ERR(bitmap->sb_page);
@@ -957,11 +964,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
*/
page = bitmap->sb_page;
offset = sizeof(bitmap_super_t);
+ read_sb_page(bitmap->mddev, bitmap->offset,
+ page,
+ index, count);
} else if (file) {
page = read_page(file, index, bitmap, count);
offset = 0;
} else {
- page = read_sb_page(bitmap->mddev, bitmap->offset, index);
+ page = read_sb_page(bitmap->mddev, bitmap->offset,
+ NULL,
+ index, count);
offset = 0;
}
if (IS_ERR(page)) { /* read error */
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index d62fd4f6b52..ee090413e59 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2008,6 +2008,9 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
return FAILED;
}
+ /* make sure we have no outstanding commands at this stage */
+ mptscsih_flush_running_cmds(hd);
+
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
ioc->name, SCpnt);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 533923f83f1..73b0ca061bb 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -317,7 +317,6 @@ int gru_proc_init(void)
{
struct proc_entry *p;
- proc_mkdir("sgi_uv", NULL);
proc_gru = proc_mkdir("sgi_uv/gru", NULL);
for (p = proc_files; p->name; p++)
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index ed1722e5004..7b4cbd5e03e 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -194,9 +194,10 @@ enum xp_retval {
xpGruSendMqError, /* 59: gru send message queue related error */
xpBadChannelNumber, /* 60: invalid channel number */
- xpBadMsgType, /* 60: invalid message type */
+ xpBadMsgType, /* 61: invalid message type */
+ xpBiosError, /* 62: BIOS error */
- xpUnknownReason /* 61: unknown reason - must be last in enum */
+ xpUnknownReason /* 63: unknown reason - must be last in enum */
};
/*
@@ -345,6 +346,8 @@ extern unsigned long (*xp_pa) (void *);
extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
size_t);
extern int (*xp_cpu_to_nasid) (int);
+extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
+extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
extern u64 xp_nofault_PIOR_target;
extern int xp_nofault_PIOR(void *);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 66a1d19e08a..9a2e77172d9 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy);
int (*xp_cpu_to_nasid) (int cpuid);
EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
+enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
+ unsigned long size);
+EXPORT_SYMBOL_GPL(xp_expand_memprotect);
+enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
+ unsigned long size);
+EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
+
/*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC.
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index 1440134caf3..fb3ec9d735a 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid)
return cpuid_to_nasid(cpuid);
}
+static enum xp_retval
+xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
+{
+ u64 nasid_array = 0;
+ int ret;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+ return xpSuccess;
+}
+
+static enum xp_retval
+xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
+{
+ u64 nasid_array = 0;
+ int ret;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+ return xpSuccess;
+}
+
enum xp_retval
xp_init_sn2(void)
{
@@ -132,6 +164,8 @@ xp_init_sn2(void)
xp_pa = xp_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
+ xp_expand_memprotect = xp_expand_memprotect_sn2;
+ xp_restrict_memprotect = xp_restrict_memprotect_sn2;
return xp_register_nofault_code_sn2();
}
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d9f7ce2510b..d238576b26f 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -15,6 +15,11 @@
#include <linux/device.h>
#include <asm/uv/uv_hub.h>
+#if defined CONFIG_X86_64
+#include <asm/uv/bios.h>
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#include <asm/sn/sn_sal.h>
+#endif
#include "../sgi-gru/grukservices.h"
#include "xp.h"
@@ -49,18 +54,79 @@ xp_cpu_to_nasid_uv(int cpuid)
return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
}
+static enum xp_retval
+xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
+{
+ int ret;
+
+#if defined CONFIG_X86_64
+ ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW);
+ if (ret != BIOS_STATUS_SUCCESS) {
+ dev_err(xp, "uv_bios_change_memprotect(,, "
+ "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret);
+ return xpBiosError;
+ }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ u64 nasid_array;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+#else
+ #error not a supported configuration
+#endif
+ return xpSuccess;
+}
+
+static enum xp_retval
+xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
+{
+ int ret;
+
+#if defined CONFIG_X86_64
+ ret = uv_bios_change_memprotect(phys_addr, size,
+ UV_MEMPROT_RESTRICT_ACCESS);
+ if (ret != BIOS_STATUS_SUCCESS) {
+ dev_err(xp, "uv_bios_change_memprotect(,, "
+ "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret);
+ return xpBiosError;
+ }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ u64 nasid_array;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+#else
+ #error not a supported configuration
+#endif
+ return xpSuccess;
+}
+
enum xp_retval
xp_init_uv(void)
{
BUG_ON(!is_uv());
xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
- xp_partition_id = 0; /* !!! not correct value */
- xp_region_size = 0; /* !!! not correct value */
+ xp_partition_id = sn_partition_id;
+ xp_region_size = sn_region_size;
xp_pa = xp_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
+ xp_expand_memprotect = xp_expand_memprotect_uv;
+ xp_restrict_memprotect = xp_restrict_memprotect_uv;
return xpSuccess;
}
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 619208d6186..a5bd658c2e8 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 {
xpc_nasid_mask_nlongs))
/*
+ * Info pertinent to a GRU message queue using a watch list for irq generation.
+ */
+struct xpc_gru_mq_uv {
+ void *address; /* address of GRU message queue */
+ unsigned int order; /* size of GRU message queue as a power of 2 */
+ int irq; /* irq raised when message is received in mq */
+ int mmr_blade; /* blade where watchlist was allocated from */
+ unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
+ int watchlist_num; /* number of watchlist allocatd by BIOS */
+};
+
+/*
* The activate_mq is used to send/receive GRU messages that affect XPC's
* heartbeat, partition active state, and channel state. This is UV only.
*/
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index b4882ccf634..73b7fb8de47 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
static enum xp_retval
xpc_allow_amo_ops_sn2(struct amo *amos_page)
{
- u64 nasid_array = 0;
- int ret;
+ enum xp_retval ret = xpSuccess;
/*
* On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
* collides with memory operations. On those systems we call
* xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
*/
- if (!enable_shub_wars_1_1()) {
- ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
- SN_MEMPROT_ACCESS_CLASS_1,
- &nasid_array);
- if (ret != 0)
- return xpSalError;
- }
- return xpSuccess;
+ if (!enable_shub_wars_1_1())
+ ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
+
+ return ret;
}
/*
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 1ac694c0162..91a55b1b103 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,7 +18,15 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <asm/uv/uv_hub.h>
+#if defined CONFIG_X86_64
+#include <asm/uv/bios.h>
+#include <asm/uv/uv_irq.h>
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#include <asm/sn/intr.h>
+#include <asm/sn/sn_sal.h>
+#endif
#include "../sgi-gru/gru.h"
#include "../sgi-gru/grukservices.h"
#include "xpc.h"
@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv;
static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
-#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
+#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
+ XPC_ACTIVATE_MSG_SIZE_UV)
+#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
-#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
- XPC_ACTIVATE_MSG_SIZE_UV)
-#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
- XPC_NOTIFY_MSG_SIZE_UV)
+#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
+#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
+ XPC_NOTIFY_MSG_SIZE_UV)
+#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
-static void *xpc_activate_mq_uv;
-static void *xpc_notify_mq_uv;
+static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
+static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
static int
xpc_setup_partitions_sn_uv(void)
@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void)
return 0;
}
-static void *
-xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq,
+static int
+xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
+{
+#if defined CONFIG_X86_64
+ mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
+ if (mq->irq < 0) {
+ dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
+ mq->irq);
+ }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode;
+ unsigned long mmr_value;
+
+ if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
+ mq->irq = SGI_XPC_ACTIVATE;
+ else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
+ mq->irq = SGI_XPC_NOTIFY;
+ else
+ return -EINVAL;
+
+ mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+ mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
+
+ uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
+#else
+ #error not a supported configuration
+#endif
+
+ return 0;
+}
+
+static void
+xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
+{
+#if defined CONFIG_X86_64
+ uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode;
+ unsigned long mmr_value;
+
+ mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+ mmr_value = 1UL << 16;
+
+ uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
+#else
+ #error not a supported configuration
+#endif
+}
+
+static int
+xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
+{
+ int ret;
+
+#if defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+ "ret=%d\n", ret);
+ return ret;
+ }
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ ret = sn_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
+ ret);
+ return -EBUSY;
+ }
+#else
+ #error not a supported configuration
+#endif
+
+ mq->watchlist_num = ret;
+ return 0;
+}
+
+static void
+xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
+{
+ int ret;
+
+#if defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ BUG_ON(ret != BIOS_STATUS_SUCCESS);
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ BUG_ON(ret != SALRET_OK);
+#else
+ #error not a supported configuration
+#endif
+}
+
+static struct xpc_gru_mq_uv *
+xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
irq_handler_t irq_handler)
{
+ enum xp_retval xp_ret;
int ret;
int nid;
- int mq_order;
+ int pg_order;
struct page *page;
- void *mq;
+ struct xpc_gru_mq_uv *mq;
+
+ mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
+ if (mq == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
+ "a xpc_gru_mq_uv structure\n");
+ ret = -ENOMEM;
+ goto out_1;
+ }
+
+ pg_order = get_order(mq_size);
+ mq->order = pg_order + PAGE_SHIFT;
+ mq_size = 1UL << mq->order;
+
+ mq->mmr_blade = uv_cpu_to_blade_id(cpu);
- nid = cpu_to_node(cpuid);
- mq_order = get_order(mq_size);
+ nid = cpu_to_node(cpu);
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
- mq_order);
+ pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
- return NULL;
+ ret = -ENOMEM;
+ goto out_2;
}
+ mq->address = page_address(page);
- mq = page_address(page);
- ret = gru_create_message_queue(mq, mq_size);
+ ret = gru_create_message_queue(mq->address, mq_size);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
- free_pages((unsigned long)mq, mq_order);
- return NULL;
+ ret = -EINVAL;
+ goto out_3;
}
- /* !!! Need to do some other things to set up IRQ */
+ /* enable generation of irq when GRU mq operation occurs to this mq */
+ ret = xpc_gru_mq_watchlist_alloc_uv(mq);
+ if (ret != 0)
+ goto out_3;
- ret = request_irq(irq, irq_handler, 0, "xpc", NULL);
+ ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
+ if (ret != 0)
+ goto out_4;
+
+ ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
if (ret != 0) {
dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
- irq, ret);
- free_pages((unsigned long)mq, mq_order);
- return NULL;
+ mq->irq, ret);
+ goto out_5;
}
- /* !!! enable generation of irq when GRU mq op occurs to this mq */
-
- /* ??? allow other partitions to access GRU mq? */
+ /* allow other partitions to access this GRU mq */
+ xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
+ if (xp_ret != xpSuccess) {
+ ret = -EACCES;
+ goto out_6;
+ }
return mq;
+
+ /* something went wrong */
+out_6:
+ free_irq(mq->irq, NULL);
+out_5:
+ xpc_release_gru_mq_irq_uv(mq);
+out_4:
+ xpc_gru_mq_watchlist_free_uv(mq);
+out_3:
+ free_pages((unsigned long)mq->address, pg_order);
+out_2:
+ kfree(mq);
+out_1:
+ return ERR_PTR(ret);
}
static void
-xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq)
+xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
{
- /* ??? disallow other partitions to access GRU mq? */
+ unsigned int mq_size;
+ int pg_order;
+ int ret;
+
+ /* disallow other partitions to access GRU mq */
+ mq_size = 1UL << mq->order;
+ ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
+ BUG_ON(ret != xpSuccess);
- /* !!! disable generation of irq when GRU mq op occurs to this mq */
+ /* unregister irq handler and release mq irq/vector mapping */
+ free_irq(mq->irq, NULL);
+ xpc_release_gru_mq_irq_uv(mq);
- free_irq(irq, NULL);
+ /* disable generation of irq when GRU mq op occurs to this mq */
+ xpc_gru_mq_watchlist_free_uv(mq);
- free_pages((unsigned long)mq, get_order(mq_size));
+ pg_order = mq->order - PAGE_SHIFT;
+ free_pages((unsigned long)mq->address, pg_order);
+
+ kfree(mq);
}
static enum xp_retval
@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
struct xpc_partition *part;
int wakeup_hb_checker = 0;
- while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) {
+ while (1) {
+ msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address);
+ if (msg_hdr == NULL)
+ break;
partid = msg_hdr->partid;
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
}
}
- gru_free_message(xpc_activate_mq_uv, msg_hdr);
+ gru_free_message(xpc_activate_mq_uv->address, msg_hdr);
}
if (wakeup_hb_checker)
@@ -482,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
struct xpc_partition_uv *part_uv = &part->sn.uv;
/*
- * !!! Make our side think that the remote parition sent an activate
+ * !!! Make our side think that the remote partition sent an activate
* !!! message our way by doing what the activate IRQ handler would
* !!! do had one really been sent.
*/
@@ -500,14 +660,39 @@ static enum xp_retval
xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
size_t *len)
{
- /* !!! call the UV version of sn_partition_reserved_page_pa() */
- return xpUnsupported;
+ s64 status;
+ enum xp_retval ret;
+
+#if defined CONFIG_X86_64
+ status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
+ (u64 *)len);
+ if (status == BIOS_STATUS_SUCCESS)
+ ret = xpSuccess;
+ else if (status == BIOS_STATUS_MORE_PASSES)
+ ret = xpNeedMoreInfo;
+ else
+ ret = xpBiosError;
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
+ if (status == SALRET_OK)
+ ret = xpSuccess;
+ else if (status == SALRET_MORE_PASSES)
+ ret = xpNeedMoreInfo;
+ else
+ ret = xpSalError;
+
+#else
+ #error not a supported configuration
+#endif
+
+ return ret;
}
static int
xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
{
- rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv);
+ rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address);
return 0;
}
@@ -1411,22 +1596,18 @@ xpc_init_uv(void)
return -E2BIG;
}
- /* ??? The cpuid argument's value is 0, is that what we want? */
- /* !!! The irq argument's value isn't correct. */
- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
+ xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
+ XPC_ACTIVATE_IRQ_NAME,
xpc_handle_activate_IRQ_uv);
- if (xpc_activate_mq_uv == NULL)
- return -ENOMEM;
+ if (IS_ERR(xpc_activate_mq_uv))
+ return PTR_ERR(xpc_activate_mq_uv);
- /* ??? The cpuid argument's value is 0, is that what we want? */
- /* !!! The irq argument's value isn't correct. */
- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
+ xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
+ XPC_NOTIFY_IRQ_NAME,
xpc_handle_notify_IRQ_uv);
- if (xpc_notify_mq_uv == NULL) {
- /* !!! The irq argument's value isn't correct. */
- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv,
- XPC_ACTIVATE_MQ_SIZE_UV, 0);
- return -ENOMEM;
+ if (IS_ERR(xpc_notify_mq_uv)) {
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ return PTR_ERR(xpc_notify_mq_uv);
}
return 0;
@@ -1435,9 +1616,6 @@ xpc_init_uv(void)
void
xpc_exit_uv(void)
{
- /* !!! The irq argument's value isn't correct. */
- xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0);
-
- /* !!! The irq argument's value isn't correct. */
- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
+ xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a1a3d0e5d2b..9e8222f9e90 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -543,9 +543,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_pg_ring; j++) {
if (rxr->rx_pg_desc_ring[j])
pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- rxr->rx_pg_desc_ring[i],
- rxr->rx_pg_desc_mapping[i]);
- rxr->rx_pg_desc_ring[i] = NULL;
+ rxr->rx_pg_desc_ring[j],
+ rxr->rx_pg_desc_mapping[j]);
+ rxr->rx_pg_desc_ring[j] = NULL;
}
if (rxr->rx_pg_ring)
vfree(rxr->rx_pg_ring);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index c414554ac32..36cb6e95b46 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -959,7 +959,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
ndev->last_rx = jiffies;
- netif_rx(skb);
+ netif_rx_ni(skb);
}
}
/*
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index f863aee6648..3f5d9154324 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -22,7 +22,7 @@
*/
#ifndef __JME_H_INCLUDED__
-#define __JME_H_INCLUDEE__
+#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
#define DRV_VERSION "1.0.3"
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 536bda1f428..289fc267edf 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -105,8 +105,6 @@ int mdiobus_register(struct mii_bus *bus)
return -EINVAL;
}
- bus->state = MDIOBUS_REGISTERED;
-
mutex_init(&bus->mdio_lock);
if (bus->reset)
@@ -123,6 +121,9 @@ int mdiobus_register(struct mii_bus *bus)
}
}
+ if (!err)
+ bus->state = MDIOBUS_REGISTERED;
+
pr_info("%s: probed\n", bus->name);
return err;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 1d2ef8f4778..5a40f2d78be 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1509,6 +1509,11 @@ static int __netdev_rx(struct net_device *dev, int *quota)
desc->status = 0;
np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
}
+
+ if (*quota == 0) { /* out of rx quota */
+ retcode = 1;
+ goto out;
+ }
writew(np->rx_done, np->base + CompletionQConsumerIdx);
out:
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c41d6876136..e60498232b9 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1098,6 +1098,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
dma_addr_t tail_list_phys;
u8 *tail_buffer;
unsigned long flags;
+ unsigned int txlen;
if ( ! priv->phyOnline ) {
TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
@@ -1108,6 +1109,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
return 0;
+ txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
tail_list = priv->txList + priv->txTail;
tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
@@ -1125,16 +1127,16 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
if ( bbuf ) {
tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
- skb_copy_from_linear_data(skb, tail_buffer, skb->len);
+ skb_copy_from_linear_data(skb, tail_buffer, txlen);
} else {
tail_list->buffer[0].address = pci_map_single(priv->pciDev,
- skb->data, skb->len,
+ skb->data, txlen,
PCI_DMA_TODEVICE);
TLan_StoreSKB(tail_list, skb);
}
- tail_list->frameSize = (u16) skb->len;
- tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
+ tail_list->frameSize = (u16) txlen;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
@@ -1431,7 +1433,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
if ( ! bbuf ) {
struct sk_buff *skb = TLan_GetSKB(head_list);
pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
- skb->len, PCI_DMA_TODEVICE);
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
@@ -2055,9 +2059,12 @@ static void TLan_FreeLists( struct net_device *dev )
list = priv->txList + i;
skb = TLan_GetSKB(list);
if ( skb ) {
- pci_unmap_single(priv->pciDev,
- list->buffer[0].address, skb->len,
- PCI_DMA_TODEVICE);
+ pci_unmap_single(
+ priv->pciDev,
+ list->buffer[0].address,
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any( skb );
list->buffer[8].address = 0;
list->buffer[9].address = 0;
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index f9e244da30a..9bcb6cbd5aa 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -113,7 +113,7 @@ struct acpiphp_slot {
u8 device; /* pci device# */
- u32 sun; /* ACPI _SUN (slot unique number) */
+ unsigned long long sun; /* ACPI _SUN (slot unique number) */
u32 flags; /* see below */
};
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 95b536a23d2..43c10bd261b 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -337,7 +337,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
acpiphp_slot->slot = slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun);
+ snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
retval = pci_hp_register(slot->hotplug_slot,
acpiphp_slot->bridge->pci_bus,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 955aae4071f..3affc6472e6 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -255,13 +255,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
bridge->nr_slots++;
- dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n",
+ dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
slot->sun, pci_domain_nr(bridge->pci_bus),
bridge->pci_bus->number, slot->device);
retval = acpiphp_register_hotplug_slot(slot);
if (retval) {
if (retval == -EBUSY)
- warn("Slot %d already registered by another "
+ warn("Slot %llu already registered by another "
"hotplug driver\n", slot->sun);
else
warn("acpiphp_register_hotplug_slot failed "
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index c892daae74d..633e743442a 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1402,10 +1402,6 @@ static int __init ibmphp_init(void)
goto error;
}
- /* lock ourselves into memory with a module
- * count of -1 so that no one can unload us. */
- module_put(THIS_MODULE);
-
exit:
return rc;
@@ -1423,4 +1419,3 @@ static void __exit ibmphp_exit(void)
}
module_init(ibmphp_init);
-module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 4b23bc39b11..39cf248d24e 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -432,18 +432,19 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
goto err_out_release_ctlr;
}
+ /* Check if slot is occupied */
t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
-
- t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */
- if (value && pciehp_force) {
- rc = pciehp_enable_slot(t_slot);
- if (rc) /* -ENODEV: shouldn't happen, but deal with it */
- value = 0;
- }
- if ((POWER_CTRL(ctrl)) && !value) {
- rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
- if (rc)
- goto err_out_free_ctrl_slot;
+ t_slot->hpc_ops->get_adapter_status(t_slot, &value);
+ if (value) {
+ if (pciehp_force)
+ pciehp_enable_slot(t_slot);
+ } else {
+ /* Power off slot if not occupied */
+ if (POWER_CTRL(ctrl)) {
+ rc = t_slot->hpc_ops->power_off_slot(t_slot);
+ if (rc)
+ goto err_out_free_ctrl_slot;
+ }
}
return 0;
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index dfc63d01f20..aac7006949f 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -252,7 +252,7 @@ static void report_resume(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
- !dev->driver->err_handler->slot_reset)
+ !dev->driver->err_handler->resume)
return;
err_handler = dev->driver->err_handler;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5f4f85f56cb..ce098561513 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -606,27 +606,6 @@ static void __init quirk_ioapic_rmw(struct pci_dev *dev)
sis_apic_bug = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
-
-#define AMD8131_revA0 0x01
-#define AMD8131_revB0 0x11
-#define AMD8131_MISC 0x40
-#define AMD8131_NIOAMODE_BIT 0
-static void quirk_amd_8131_ioapic(struct pci_dev *dev)
-{
- unsigned char tmp;
-
- if (nr_ioapics == 0)
- return;
-
- if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) {
- dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n");
- pci_read_config_byte( dev, AMD8131_MISC, &tmp);
- tmp &= ~(1 << AMD8131_NIOAMODE_BIT);
- pci_write_config_byte( dev, AMD8131_MISC, tmp);
- }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
#endif /* CONFIG_X86_IO_APIC */
/*
@@ -1423,6 +1402,155 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
+#ifdef CONFIG_X86_IO_APIC
+/*
+ * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
+ * remap the original interrupt in the linux kernel to the boot interrupt, so
+ * that a PCI device's interrupt handler is installed on the boot interrupt
+ * line instead.
+ */
+static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
+{
+ if (noioapicquirk || noioapicreroute)
+ return;
+
+ dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
+
+ printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n",
+ dev->vendor, dev->device);
+ return;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
+
+/*
+ * On some chipsets we can disable the generation of legacy INTx boot
+ * interrupts.
+ */
+
+/*
+ * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
+ * 300641-004US, section 5.7.3.
+ */
+#define INTEL_6300_IOAPIC_ABAR 0x40
+#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
+
+static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
+{
+ u16 pci_config_word;
+
+ if (noioapicquirk)
+ return;
+
+ pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
+ pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
+ pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
+
+ printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n",
+ dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+
+/*
+ * disable boot interrupts on HT-1000
+ */
+#define BC_HT1000_FEATURE_REG 0x64
+#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
+#define BC_HT1000_MAP_IDX 0xC00
+#define BC_HT1000_MAP_DATA 0xC01
+
+static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
+{
+ u32 pci_config_dword;
+ u8 irq;
+
+ if (noioapicquirk)
+ return;
+
+ pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
+ pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
+ BC_HT1000_PIC_REGS_ENABLE);
+
+ for (irq = 0x10; irq < 0x10 + 32; irq++) {
+ outb(irq, BC_HT1000_MAP_IDX);
+ outb(0x00, BC_HT1000_MAP_DATA);
+ }
+
+ pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
+
+ printk(KERN_INFO "disabled boot interrupts on PCI device"
+ "0x%04x:0x%04x\n", dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+
+/*
+ * disable boot interrupts on AMD and ATI chipsets
+ */
+/*
+ * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
+ * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
+ * (due to an erratum).
+ */
+#define AMD_813X_MISC 0x40
+#define AMD_813X_NOIOAMODE (1<<0)
+
+static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
+{
+ u32 pci_config_dword;
+
+ if (noioapicquirk)
+ return;
+
+ pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
+ pci_config_dword &= ~AMD_813X_NOIOAMODE;
+ pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
+
+ printk(KERN_INFO "disabled boot interrupts on PCI device "
+ "0x%04x:0x%04x\n", dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+
+#define AMD_8111_PCI_IRQ_ROUTING 0x56
+
+static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
+{
+ u16 pci_config_word;
+
+ if (noioapicquirk)
+ return;
+
+ pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
+ if (!pci_config_word) {
+ printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x "
+ "already disabled\n",
+ dev->vendor, dev->device);
+ return;
+ }
+ pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
+ printk(KERN_INFO "disabled boot interrupts on PCI device "
+ "0x%04x:0x%04x\n", dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+#endif /* CONFIG_X86_IO_APIC */
+
/*
* Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
* but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 162cd927d94..94acbeed4e7 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -175,8 +175,8 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
{ aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
{ aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
{ aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 2a5b29d1217..e2dd6a45924 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -864,21 +864,23 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&vport->crq_work, handle_crq);
- err = crq_queue_create(&vport->crq_queue, target);
+ err = scsi_add_host(shost, target->dev);
if (err)
goto free_srp_target;
- err = scsi_add_host(shost, target->dev);
+ err = scsi_tgt_alloc_queue(shost);
if (err)
- goto destroy_queue;
+ goto remove_host;
- err = scsi_tgt_alloc_queue(shost);
+ err = crq_queue_create(&vport->crq_queue, target);
if (err)
- goto destroy_queue;
+ goto free_queue;
return 0;
-destroy_queue:
- crq_queue_destroy(target);
+free_queue:
+ scsi_tgt_free_queue(shost);
+remove_host:
+ scsi_remove_host(shost);
free_srp_target:
srp_target_free(target);
put_host:
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 801c7cf54d2..3fdee7370cc 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -489,12 +489,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (!__kfifo_get(session->cmdpool.queue,
(void*)&task, sizeof(void*)))
return NULL;
-
- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
- hdr->ttt == RESERVED_ITT) {
- conn->ping_task = task;
- conn->last_ping = jiffies;
- }
}
/*
* released in complete pdu for task we expect a response for, and
@@ -703,6 +697,11 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
if (!task)
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+ else if (!rhdr) {
+ /* only track our nops */
+ conn->ping_task = task;
+ conn->last_ping = jiffies;
+ }
}
static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fa45a1a6686..148d3af92ae 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -648,8 +648,8 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
struct request *req = cmd->request;
unsigned long flags;
- scsi_unprep_request(req);
spin_lock_irqsave(q->queue_lock, flags);
+ scsi_unprep_request(req);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 526c191e84e..8dc7109d61b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -44,13 +44,15 @@
#include <linux/list.h>
#include <linux/sysdev.h>
-#include <asm/xen/hypervisor.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/xenbus.h>
#include <xen/features.h>
diff --git a/drivers/xen/features.c b/drivers/xen/features.c
index 0707714e40d..99eda169c77 100644
--- a/drivers/xen/features.c
+++ b/drivers/xen/features.c
@@ -8,7 +8,11 @@
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/module.h>
-#include <asm/xen/hypervisor.h>
+
+#include <asm/xen/hypercall.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
#include <xen/features.h>
u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 06592b9da83..7d8f531fb8e 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -40,6 +40,7 @@
#include <xen/interface/xen.h>
#include <xen/page.h>
#include <xen/grant_table.h>
+#include <asm/xen/hypercall.h>
#include <asm/pgtable.h>
#include <asm/sync_bitops.h>