aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c20
-rw-r--r--drivers/acpi/battery.c34
-rw-r--r--drivers/acpi/blacklist.c16
-rw-r--r--drivers/acpi/bus.c91
-rw-r--r--drivers/acpi/glue.c40
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/pci_bind.c313
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_root.c297
-rw-r--r--drivers/acpi/power.c28
-rw-r--r--drivers/acpi/processor_core.c45
-rw-r--r--drivers/acpi/processor_idle.c47
-rw-r--r--drivers/acpi/scan.c69
-rw-r--r--drivers/acpi/video.c61
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/char/bsr.c42
-rw-r--r--drivers/char/mxser.c2
-rw-r--r--drivers/char/nozomi.c2
-rw-r--r--drivers/char/synclink_gt.c72
-rw-r--r--drivers/char/tty_ldisc.c15
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/dma/txx9dmac.c20
-rw-r--r--drivers/edac/amd64_edac.c25
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/edac_core.h4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/mpc85xx_edac.h1
-rw-r--r--drivers/gpio/pl061.c20
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c25
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h29
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c34
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c199
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1153
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_dp_i2c.c272
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h17
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c35
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c344
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c72
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-designware.c624
-rw-r--r--drivers/ide/cs5520.c1
-rw-r--r--drivers/ide/ide-acpi.c37
-rw-r--r--drivers/ide/ide-cd.c24
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-dma.c21
-rw-r--r--drivers/ide/ide-eh.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c68
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-iops.c4
-rw-r--r--drivers/ide/ide-pm.c30
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-alix2.c7
-rw-r--r--drivers/leds/leds-bd2802.c96
-rw-r--r--drivers/leds/leds-gpio.c22
-rw-r--r--drivers/leds/leds-lp3944.c466
-rw-r--r--drivers/leds/leds-pca9532.c58
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c4
-rw-r--r--drivers/macintosh/macio_asic.c11
-rw-r--r--drivers/md/Kconfig30
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--drivers/md/dm-delay.c26
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-io.c14
-rw-r--r--drivers/md/dm-ioctl.c27
-rw-r--r--drivers/md/dm-linear.c15
-rw-r--r--drivers/md/dm-log-userspace-base.c696
-rw-r--r--drivers/md/dm-log-userspace-transfer.c276
-rw-r--r--drivers/md/dm-log-userspace-transfer.h18
-rw-r--r--drivers/md/dm-log.c9
-rw-r--r--drivers/md/dm-mpath.c334
-rw-r--r--drivers/md/dm-path-selector.h8
-rw-r--r--drivers/md/dm-queue-length.c263
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-round-robin.c2
-rw-r--r--drivers/md/dm-service-time.c339
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c11
-rw-r--r--drivers/md/dm-stripe.c33
-rw-r--r--drivers/md/dm-sysfs.c9
-rw-r--r--drivers/md/dm-table.c465
-rw-r--r--drivers/md/dm.c1137
-rw-r--r--drivers/md/dm.h35
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/inftlcore.c11
-rw-r--r--drivers/mtd/maps/integrator-flash.c22
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c7
-rw-r--r--drivers/mtd/nftlcore.c16
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_main.c45
-rw-r--r--drivers/net/bnx2.c10
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/fsl_pq_mdio.c8
-rw-r--r--drivers/net/igb/igb_main.c14
-rw-r--r--drivers/net/irda/bfin_sir.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c45
-rw-r--r--drivers/net/mdio.c4
-rw-r--r--drivers/net/netxen/netxen_nic_init.c37
-rw-r--r--drivers/net/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/qla3xxx.c6
-rw-r--r--drivers/net/sh_eth.c9
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c10
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c41
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c40
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c27
-rw-r--r--drivers/pci/intel-iommu.c307
-rw-r--r--drivers/pci/intr_remapping.c160
-rw-r--r--drivers/pci/intr_remapping.h2
-rw-r--r--drivers/pci/iova.c26
-rw-r--r--drivers/platform/x86/Kconfig35
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acerhdf.c602
-rw-r--r--drivers/platform/x86/asus-laptop.c111
-rw-r--r--drivers/platform/x86/asus_acpi.c30
-rw-r--r--drivers/platform/x86/dell-wmi.c56
-rw-r--r--drivers/platform/x86/eeepc-laptop.c468
-rw-r--r--drivers/platform/x86/hp-wmi.c87
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c411
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c46
-rw-r--r--drivers/power/Kconfig8
-rw-r--r--drivers/power/Makefile3
-rw-r--r--drivers/power/da9030_battery.c19
-rw-r--r--drivers/power/ds2760_battery.c42
-rw-r--r--drivers/power/max17040_battery.c309
-rw-r--r--drivers/rtc/rtc-bfin.c30
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/serial/8250_pci.c12
-rw-r--r--drivers/serial/icom.c3
-rw-r--r--drivers/serial/jsm/jsm_tty.c4
-rw-r--r--drivers/serial/serial_txx9.c113
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/spi_bitbang.c24
-rw-r--r--drivers/spi/spidev.c17
-rw-r--r--drivers/staging/octeon/Makefile1
-rw-r--r--drivers/staging/octeon/ethernet-common.c328
-rw-r--r--drivers/staging/octeon/ethernet-common.h29
-rw-r--r--drivers/staging/octeon/ethernet-defines.h2
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c9
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c9
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c62
-rw-r--r--drivers/staging/octeon/ethernet-tx.h25
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c9
-rw-r--r--drivers/staging/octeon/ethernet.c470
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h11
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/uwb/hwa-rc.c2
-rw-r--r--drivers/uwb/wlp/txrx.c2
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c141
-rw-r--r--drivers/video/aty/mach64_accel.c7
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fsl-diu-fb.c14
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c3
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c5
-rw-r--r--drivers/video/mx3fb.c17
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sh7760fb.c19
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sm501fb.c21
-rw-r--r--drivers/video/w100fb.c2
-rw-r--r--drivers/w1/slaves/w1_ds2760.c30
-rw-r--r--drivers/w1/slaves/w1_ds2760.h7
-rw-r--r--drivers/watchdog/wdrtas.c8
235 files changed, 11250 insertions, 2952 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 88e42abf5d8..0df8fcb687d 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,6 +61,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
static int acpi_ac_resume(struct acpi_device *device);
+static void acpi_ac_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
@@ -72,10 +73,12 @@ static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
.ids = ac_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
.resume = acpi_ac_resume,
+ .notify = acpi_ac_notify,
},
};
@@ -220,16 +223,14 @@ static int acpi_ac_remove_fs(struct acpi_device *device)
Driver Model
-------------------------------------------------------------------------- */
-static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
+static void acpi_ac_notify(struct acpi_device *device, u32 event)
{
- struct acpi_ac *ac = data;
- struct acpi_device *device = NULL;
+ struct acpi_ac *ac = acpi_driver_data(device);
if (!ac)
return;
- device = ac->device;
switch (event) {
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -253,7 +254,6 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
static int acpi_ac_add(struct acpi_device *device)
{
int result = 0;
- acpi_status status = AE_OK;
struct acpi_ac *ac = NULL;
@@ -286,13 +286,6 @@ static int acpi_ac_add(struct acpi_device *device)
ac->charger.get_property = get_ac_property;
power_supply_register(&ac->device->dev, &ac->charger);
#endif
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY, acpi_ac_notify,
- ac);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
@@ -328,7 +321,6 @@ static int acpi_ac_resume(struct acpi_device *device)
static int acpi_ac_remove(struct acpi_device *device, int type)
{
- acpi_status status = AE_OK;
struct acpi_ac *ac = NULL;
@@ -337,8 +329,6 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
ac = acpi_driver_data(device);
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY, acpi_ac_notify);
#ifdef CONFIG_ACPI_SYSFS_POWER
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0de6312919..58b4517ce71 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -796,13 +796,12 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
Driver Interface
-------------------------------------------------------------------------- */
-static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+static void acpi_battery_notify(struct acpi_device *device, u32 event)
{
- struct acpi_battery *battery = data;
- struct acpi_device *device;
+ struct acpi_battery *battery = acpi_driver_data(device);
+
if (!battery)
return;
- device = battery->device;
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
@@ -819,7 +818,6 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
- acpi_status status = 0;
struct acpi_battery *battery = NULL;
if (!device)
return -EINVAL;
@@ -834,22 +832,12 @@ static int acpi_battery_add(struct acpi_device *device)
acpi_battery_update(battery);
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
- if (result)
- goto end;
#endif
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- acpi_battery_notify, battery);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler"));
- result = -ENODEV;
- goto end;
- }
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
- device->status.battery_present ? "present" : "absent");
- end:
- if (result) {
+ if (!result) {
+ printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ device->status.battery_present ? "present" : "absent");
+ } else {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
@@ -860,15 +848,11 @@ static int acpi_battery_add(struct acpi_device *device)
static int acpi_battery_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
struct acpi_battery *battery = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
battery = acpi_driver_data(device);
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- acpi_battery_notify);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
@@ -896,10 +880,12 @@ static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
.ids = battery_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
.resume = acpi_battery_resume,
.remove = acpi_battery_remove,
+ .notify = acpi_battery_notify,
},
};
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 09c69806c1f..f6baa77deef 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -192,6 +192,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
},
},
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-NS10J_S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-SR290J",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index ae862f1798d..2876fc70c3a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -450,18 +450,16 @@ int acpi_bus_receive_event(struct acpi_bus_event *event)
Notification Handling
-------------------------------------------------------------------------- */
-static int
-acpi_bus_check_device(struct acpi_device *device, int *status_changed)
+static void acpi_bus_check_device(acpi_handle handle)
{
- acpi_status status = 0;
+ struct acpi_device *device;
+ acpi_status status;
struct acpi_device_status old_status;
-
+ if (acpi_bus_get_device(handle, &device))
+ return;
if (!device)
- return -EINVAL;
-
- if (status_changed)
- *status_changed = 0;
+ return;
old_status = device->status;
@@ -471,22 +469,15 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed)
*/
if (device->parent && !device->parent->status.present) {
device->status = device->parent->status;
- if (STRUCT_TO_INT(old_status) != STRUCT_TO_INT(device->status)) {
- if (status_changed)
- *status_changed = 1;
- }
- return 0;
+ return;
}
status = acpi_bus_get_status(device);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return;
if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status))
- return 0;
-
- if (status_changed)
- *status_changed = 1;
+ return;
/*
* Device Insertion/Removal
@@ -498,33 +489,17 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n"));
/* TBD: Handle device removal */
}
-
- return 0;
}
-static int acpi_bus_check_scope(struct acpi_device *device)
+static void acpi_bus_check_scope(acpi_handle handle)
{
- int result = 0;
- int status_changed = 0;
-
-
- if (!device)
- return -EINVAL;
-
/* Status Change? */
- result = acpi_bus_check_device(device, &status_changed);
- if (result)
- return result;
-
- if (!status_changed)
- return 0;
+ acpi_bus_check_device(handle);
/*
* TBD: Enumerate child devices within this device's scope and
* run acpi_bus_check_device()'s on them.
*/
-
- return 0;
}
static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
@@ -547,22 +522,19 @@ EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
- int result = 0;
struct acpi_device *device = NULL;
+ struct acpi_driver *driver;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
+ type, handle));
blocking_notifier_call_chain(&acpi_bus_notify_list,
type, (void *)handle);
- if (acpi_bus_get_device(handle, &device))
- return;
-
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received BUS CHECK notification for device [%s]\n",
- device->pnp.bus_id));
- result = acpi_bus_check_scope(device);
+ acpi_bus_check_scope(handle);
/*
* TBD: We'll need to outsource certain events to non-ACPI
* drivers via the device manager (device.c).
@@ -570,10 +542,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_DEVICE_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE CHECK notification for device [%s]\n",
- device->pnp.bus_id));
- result = acpi_bus_check_device(device, NULL);
+ acpi_bus_check_device(handle);
/*
* TBD: We'll need to outsource certain events to non-ACPI
* drivers via the device manager (device.c).
@@ -581,44 +550,26 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_DEVICE_WAKE:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE WAKE notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received EJECT REQUEST notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE CHECK LIGHT notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD: Exactly what does 'light' mean? */
break;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received FREQUENCY MISMATCH notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received BUS MODE MISMATCH notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_POWER_FAULT:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received POWER FAULT notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
@@ -629,7 +580,13 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
}
- return;
+ acpi_bus_get_device(handle, &device);
+ if (device) {
+ driver = device->driver;
+ if (driver && driver->ops.notify &&
+ (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+ driver->ops.notify(device, type);
+ }
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8bd2c2a6884..a8a5c29958c 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -140,46 +140,6 @@ struct device *acpi_get_physical_device(acpi_handle handle)
EXPORT_SYMBOL(acpi_get_physical_device);
-/* ToDo: When a PCI bridge is found, return the PCI device behind the bridge
- * This should work in general, but did not on a Lenovo T61 for the
- * graphics card. But this must be fixed when the PCI device is
- * bound and the kernel device struct is attached to the acpi device
- * Note: A success call will increase reference count by one
- * Do call put_device(dev) on the returned device then
- */
-struct device *acpi_get_physical_pci_device(acpi_handle handle)
-{
- struct device *dev;
- long long device_id;
- acpi_status status;
-
- status =
- acpi_evaluate_integer(handle, "_ADR", NULL, &device_id);
-
- if (ACPI_FAILURE(status))
- return NULL;
-
- /* We need to attempt to determine whether the _ADR refers to a
- PCI device or not. There's no terribly good way to do this,
- so the best we can hope for is to assume that there'll never
- be a device in the host bridge */
- if (device_id >= 0x10000) {
- /* It looks like a PCI device. Does it exist? */
- dev = acpi_get_physical_device(handle);
- } else {
- /* It doesn't look like a PCI device. Does its parent
- exist? */
- acpi_handle phandle;
- if (acpi_get_parent(handle, &phandle))
- return NULL;
- dev = acpi_get_physical_device(phandle);
- }
- if (!dev)
- return NULL;
- return dev;
-}
-EXPORT_SYMBOL(acpi_get_physical_pci_device);
-
static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index d916bea729f..71670719d61 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -79,6 +79,7 @@ static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
+static struct workqueue_struct *kacpi_hotplug_wq;
struct acpi_res_list {
resource_size_t start;
@@ -192,8 +193,10 @@ acpi_status acpi_os_initialize1(void)
{
kacpid_wq = create_singlethread_workqueue("kacpid");
kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
+ kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
+ BUG_ON(!kacpi_hotplug_wq);
return AE_OK;
}
@@ -206,6 +209,7 @@ acpi_status acpi_os_terminate(void)
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
+ destroy_workqueue(kacpi_hotplug_wq);
return AE_OK;
}
@@ -716,6 +720,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct workqueue_struct *queue;
+ work_func_t func;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
@@ -740,15 +745,17 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
- if (!hp) {
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- queue = (type == OSL_NOTIFY_HANDLER) ?
- kacpi_notify_wq : kacpid_wq;
- ret = queue_work(queue, &dpc->work);
- } else {
- INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred);
- ret = schedule_work(&dpc->work);
- }
+ /*
+ * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
+ * because the hotplug code may call driver .remove() functions,
+ * which invoke flush_scheduled_work/acpi_os_wait_events_complete
+ * to flush these workqueues.
+ */
+ queue = hp ? kacpi_hotplug_wq :
+ (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
+ func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred;
+ INIT_WORK(&dpc->work, func);
+ ret = queue_work(queue, &dpc->work);
if (!ret) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index bc46de3d967..a5a77b78a72 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -24,12 +24,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
@@ -38,310 +33,76 @@
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_bind");
-struct acpi_pci_data {
- struct acpi_pci_id id;
- struct pci_bus *bus;
- struct pci_dev *dev;
-};
-
-static int acpi_pci_unbind(struct acpi_device *device);
-
-static void acpi_pci_data_handler(acpi_handle handle, u32 function,
- void *context)
-{
-
- /* TBD: Anything we need to do here? */
-
- return;
-}
-
-/**
- * acpi_get_pci_id
- * ------------------
- * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem)
- * to resolve PCI information for ACPI-PCI devices defined in the namespace.
- * This typically occurs when resolving PCI operation region information.
- */
-acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id)
+static int acpi_pci_unbind(struct acpi_device *device)
{
- int result = 0;
- acpi_status status = AE_OK;
- struct acpi_device *device = NULL;
- struct acpi_pci_data *data = NULL;
-
-
- if (!id)
- return AE_BAD_PARAMETER;
-
- result = acpi_bus_get_device(handle, &device);
- if (result) {
- printk(KERN_ERR PREFIX
- "Invalid ACPI Bus context for device %s\n",
- acpi_device_bid(device));
- return AE_NOT_EXIST;
- }
-
- status = acpi_get_data(handle, acpi_pci_data_handler, (void **)&data);
- if (ACPI_FAILURE(status) || !data) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid ACPI-PCI context for device %s",
- acpi_device_bid(device)));
- return status;
- }
+ struct pci_dev *dev;
- *id = data->id;
+ dev = acpi_get_pci_dev(device->handle);
+ if (!dev || !dev->subordinate)
+ goto out;
- /*
- id->segment = data->id.segment;
- id->bus = data->id.bus;
- id->device = data->id.device;
- id->function = data->id.function;
- */
+ acpi_pci_irq_del_prt(dev->subordinate);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %s has PCI address %04x:%02x:%02x.%d\n",
- acpi_device_bid(device), id->segment, id->bus,
- id->device, id->function));
+ device->ops.bind = NULL;
+ device->ops.unbind = NULL;
- return AE_OK;
+out:
+ pci_dev_put(dev);
+ return 0;
}
-EXPORT_SYMBOL(acpi_get_pci_id);
-
-int acpi_pci_bind(struct acpi_device *device)
+static int acpi_pci_bind(struct acpi_device *device)
{
- int result = 0;
acpi_status status;
- struct acpi_pci_data *data;
- struct acpi_pci_data *pdata;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_handle handle;
+ struct pci_bus *bus;
+ struct pci_dev *dev;
- if (!device || !device->parent)
- return -EINVAL;
-
- data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status)) {
- kfree(data);
- return -ENODEV;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n",
- (char *)buffer.pointer));
-
- /*
- * Segment & Bus
- * -------------
- * These are obtained via the parent device's ACPI-PCI context.
- */
- status = acpi_get_data(device->parent->handle, acpi_pci_data_handler,
- (void **)&pdata);
- if (ACPI_FAILURE(status) || !pdata || !pdata->bus) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid ACPI-PCI context for parent device %s",
- acpi_device_bid(device->parent)));
- result = -ENODEV;
- goto end;
- }
- data->id.segment = pdata->id.segment;
- data->id.bus = pdata->bus->number;
-
- /*
- * Device & Function
- * -----------------
- * These are simply obtained from the device's _ADR method. Note
- * that a value of zero is valid.
- */
- data->id.device = device->pnp.bus_address >> 16;
- data->id.function = device->pnp.bus_address & 0xFFFF;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n",
- data->id.segment, data->id.bus, data->id.device,
- data->id.function));
-
- /*
- * TBD: Support slot devices (e.g. function=0xFFFF).
- */
-
- /*
- * Locate PCI Device
- * -----------------
- * Locate matching device in PCI namespace. If it doesn't exist
- * this typically means that the device isn't currently inserted
- * (e.g. docking station, port replicator, etc.).
- */
- data->dev = pci_get_slot(pdata->bus,
- PCI_DEVFN(data->id.device, data->id.function));
- if (!data->dev) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %04x:%02x:%02x.%d not present in PCI namespace\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function));
- result = -ENODEV;
- goto end;
- }
- if (!data->dev->bus) {
- printk(KERN_ERR PREFIX
- "Device %04x:%02x:%02x.%d has invalid 'bus' field\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function);
- result = -ENODEV;
- goto end;
- }
+ dev = acpi_get_pci_dev(device->handle);
+ if (!dev)
+ return 0;
/*
- * PCI Bridge?
- * -----------
- * If so, set the 'bus' field and install the 'bind' function to
- * facilitate callbacks for all of its children.
+ * Install the 'bind' function to facilitate callbacks for
+ * children of the P2P bridge.
*/
- if (data->dev->subordinate) {
+ if (dev->subordinate) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device %04x:%02x:%02x.%d is a PCI bridge\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function));
- data->bus = data->dev->subordinate;
+ pci_domain_nr(dev->bus), dev->bus->number,
+ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)));
device->ops.bind = acpi_pci_bind;
device->ops.unbind = acpi_pci_unbind;
}
/*
- * Attach ACPI-PCI Context
- * -----------------------
- * Thus binding the ACPI and PCI devices.
- */
- status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to attach ACPI-PCI context to device %s",
- acpi_device_bid(device)));
- result = -ENODEV;
- goto end;
- }
-
- /*
- * PCI Routing Table
- * -----------------
- * Evaluate and parse _PRT, if exists. This code is independent of
- * PCI bridges (above) to allow parsing of _PRT objects within the
- * scope of non-bridge devices. Note that _PRTs within the scope of
- * a PCI bridge assume the bridge's subordinate bus number.
+ * Evaluate and parse _PRT, if exists. This code allows parsing of
+ * _PRT objects within the scope of non-bridge devices. Note that
+ * _PRTs within the scope of a PCI bridge assume the bridge's
+ * subordinate bus number.
*
* TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_SUCCESS(status)) {
- if (data->bus) /* PCI-PCI bridge */
- acpi_pci_irq_add_prt(device->handle, data->id.segment,
- data->bus->number);
- else /* non-bridge PCI device */
- acpi_pci_irq_add_prt(device->handle, data->id.segment,
- data->id.bus);
- }
-
- end:
- kfree(buffer.pointer);
- if (result) {
- pci_dev_put(data->dev);
- kfree(data);
- }
- return result;
-}
-
-static int acpi_pci_unbind(struct acpi_device *device)
-{
- int result = 0;
- acpi_status status;
- struct acpi_pci_data *data;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
-
- if (!device || !device->parent)
- return -EINVAL;
-
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ goto out;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n",
- (char *) buffer.pointer));
- kfree(buffer.pointer);
+ if (dev->subordinate)
+ bus = dev->subordinate;
+ else
+ bus = dev->bus;
- status =
- acpi_get_data(device->handle, acpi_pci_data_handler,
- (void **)&data);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
+ acpi_pci_irq_add_prt(device->handle, bus);
- status = acpi_detach_data(device->handle, acpi_pci_data_handler);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to detach data from device %s",
- acpi_device_bid(device)));
- result = -ENODEV;
- goto end;
- }
- if (data->dev->subordinate) {
- acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
- }
- pci_dev_put(data->dev);
- kfree(data);
-
- end:
- return result;
+out:
+ pci_dev_put(dev);
+ return 0;
}
-int
-acpi_pci_bind_root(struct acpi_device *device,
- struct acpi_pci_id *id, struct pci_bus *bus)
+int acpi_pci_bind_root(struct acpi_device *device)
{
- int result = 0;
- acpi_status status;
- struct acpi_pci_data *data = NULL;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- if (!device || !id || !bus) {
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->id = *id;
- data->bus = bus;
device->ops.bind = acpi_pci_bind;
device->ops.unbind = acpi_pci_unbind;
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status)) {
- kfree (data);
- return -ENODEV;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to "
- "%04x:%02x\n", (char *)buffer.pointer,
- id->segment, id->bus));
-
- status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to attach ACPI-PCI context to device %s",
- (char *)buffer.pointer));
- result = -ENODEV;
- goto end;
- }
-
- end:
- kfree(buffer.pointer);
- if (result != 0)
- kfree(data);
-
- return result;
+ return 0;
}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 2faa9e2ac89..b794eb88ab9 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -182,7 +182,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
}
}
-static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
+static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
struct acpi_pci_routing_table *prt)
{
struct acpi_prt_entry *entry;
@@ -196,8 +196,8 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
* 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert
* it here.
*/
- entry->id.segment = segment;
- entry->id.bus = bus;
+ entry->id.segment = pci_domain_nr(bus);
+ entry->id.bus = bus->number;
entry->id.device = (prt->address >> 16) & 0xFFFF;
entry->pin = prt->pin + 1;
@@ -242,7 +242,7 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
return 0;
}
-int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
+int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -271,7 +271,7 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
- acpi_pci_irq_add_entry(handle, segment, bus, entry);
+ acpi_pci_irq_add_entry(handle, bus, entry);
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
@@ -280,16 +280,17 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
return 0;
}
-void acpi_pci_irq_del_prt(int segment, int bus)
+void acpi_pci_irq_del_prt(struct pci_bus *bus)
{
struct acpi_prt_entry *entry, *tmp;
printk(KERN_DEBUG
"ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
- segment, bus);
+ pci_domain_nr(bus), bus->number);
spin_lock(&acpi_prt_lock);
list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
- if (segment == entry->id.segment && bus == entry->id.bus) {
+ if (pci_domain_nr(bus) == entry->id.segment
+ && bus->number == entry->id.bus) {
list_del(&entry->list);
kfree(entry);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 196f97d0095..55b5b90c2a4 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -63,9 +63,10 @@ static struct acpi_driver acpi_pci_root_driver = {
struct acpi_pci_root {
struct list_head node;
- struct acpi_device * device;
- struct acpi_pci_id id;
+ struct acpi_device *device;
struct pci_bus *bus;
+ u16 segment;
+ u8 bus_nr;
u32 osc_support_set; /* _OSC state of support bits */
u32 osc_control_set; /* _OSC state of control bits */
@@ -82,7 +83,7 @@ static DEFINE_MUTEX(osc_lock);
int acpi_pci_register_driver(struct acpi_pci_driver *driver)
{
int n = 0;
- struct list_head *entry;
+ struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr)
@@ -92,9 +93,7 @@ int acpi_pci_register_driver(struct acpi_pci_driver *driver)
if (!driver->add)
return 0;
- list_for_each(entry, &acpi_pci_roots) {
- struct acpi_pci_root *root;
- root = list_entry(entry, struct acpi_pci_root, node);
+ list_for_each_entry(root, &acpi_pci_roots, node) {
driver->add(root->device->handle);
n++;
}
@@ -106,7 +105,7 @@ EXPORT_SYMBOL(acpi_pci_register_driver);
void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
{
- struct list_head *entry;
+ struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr) {
@@ -120,28 +119,48 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
if (!driver->remove)
return;
- list_for_each(entry, &acpi_pci_roots) {
- struct acpi_pci_root *root;
- root = list_entry(entry, struct acpi_pci_root, node);
+ list_for_each_entry(root, &acpi_pci_roots, node)
driver->remove(root->device->handle);
- }
}
EXPORT_SYMBOL(acpi_pci_unregister_driver);
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
{
- struct acpi_pci_root *tmp;
+ struct acpi_pci_root *root;
- list_for_each_entry(tmp, &acpi_pci_roots, node) {
- if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus))
- return tmp->device->handle;
- }
+ list_for_each_entry(root, &acpi_pci_roots, node)
+ if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ return root->device->handle;
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
+/**
+ * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
+ * @handle - the ACPI CA node in question.
+ *
+ * Note: we could make this API take a struct acpi_device * instead, but
+ * for now, it's more convenient to operate on an acpi_handle.
+ */
+int acpi_is_root_bridge(acpi_handle handle)
+{
+ int ret;
+ struct acpi_device *device;
+
+ ret = acpi_bus_get_device(handle, &device);
+ if (ret)
+ return 0;
+
+ ret = acpi_match_device_ids(device, root_device_ids);
+ if (ret)
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
+
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
@@ -161,19 +180,22 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
return AE_OK;
}
-static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum)
+static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
+ unsigned long long *bus)
{
acpi_status status;
+ int busnum;
- *busnum = -1;
+ busnum = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, busnum);
+ get_root_bridge_busnr_callback, &busnum);
if (ACPI_FAILURE(status))
return status;
/* Check if we really get a bus number from _CRS */
- if (*busnum == -1)
+ if (busnum == -1)
return AE_ERROR;
+ *bus = busnum;
return AE_OK;
}
@@ -298,6 +320,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
struct acpi_pci_root *root;
+
list_for_each_entry(root, &acpi_pci_roots, node) {
if (root->device->handle == handle)
return root;
@@ -305,6 +328,87 @@ static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
return NULL;
}
+struct acpi_handle_node {
+ struct list_head node;
+ acpi_handle handle;
+};
+
+/**
+ * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
+ * @handle: the handle in question
+ *
+ * Given an ACPI CA handle, the desired PCI device is located in the
+ * list of PCI devices.
+ *
+ * If the device is found, its reference count is increased and this
+ * function returns a pointer to its data structure. The caller must
+ * decrement the reference count by calling pci_dev_put().
+ * If no device is found, %NULL is returned.
+ */
+struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
+{
+ int dev, fn;
+ unsigned long long adr;
+ acpi_status status;
+ acpi_handle phandle;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev = NULL;
+ struct acpi_handle_node *node, *tmp;
+ struct acpi_pci_root *root;
+ LIST_HEAD(device_list);
+
+ /*
+ * Walk up the ACPI CA namespace until we reach a PCI root bridge.
+ */
+ phandle = handle;
+ while (!acpi_is_root_bridge(phandle)) {
+ node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL);
+ if (!node)
+ goto out;
+
+ INIT_LIST_HEAD(&node->node);
+ node->handle = phandle;
+ list_add(&node->node, &device_list);
+
+ status = acpi_get_parent(phandle, &phandle);
+ if (ACPI_FAILURE(status))
+ goto out;
+ }
+
+ root = acpi_pci_find_root(phandle);
+ if (!root)
+ goto out;
+
+ pbus = root->bus;
+
+ /*
+ * Now, walk back down the PCI device tree until we return to our
+ * original handle. Assumes that everything between the PCI root
+ * bridge and the device we're looking for must be a P2P bridge.
+ */
+ list_for_each_entry(node, &device_list, node) {
+ acpi_handle hnd = node->handle;
+ status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status))
+ goto out;
+ dev = (adr >> 16) & 0xffff;
+ fn = adr & 0xffff;
+
+ pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
+ if (!pdev || hnd == handle)
+ break;
+
+ pbus = pdev->subordinate;
+ pci_dev_put(pdev);
+ }
+out:
+ list_for_each_entry_safe(node, tmp, &device_list, node)
+ kfree(node);
+
+ return pdev;
+}
+EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
+
/**
* acpi_pci_osc_control_set - commit requested control to Firmware
* @handle: acpi_handle for the target ACPI object
@@ -363,31 +467,46 @@ EXPORT_SYMBOL(acpi_pci_osc_control_set);
static int __devinit acpi_pci_root_add(struct acpi_device *device)
{
- int result = 0;
- struct acpi_pci_root *root = NULL;
- struct acpi_pci_root *tmp;
- acpi_status status = AE_OK;
- unsigned long long value = 0;
- acpi_handle handle = NULL;
+ unsigned long long segment, bus;
+ acpi_status status;
+ int result;
+ struct acpi_pci_root *root;
+ acpi_handle handle;
struct acpi_device *child;
u32 flags, base_flags;
+ segment = 0;
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
+ &segment);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
+ return -ENODEV;
+ }
- if (!device)
- return -EINVAL;
+ /* Check _CRS first, then _BBN. If no _BBN, default to zero. */
+ bus = 0;
+ status = try_get_root_bridge_busnr(device->handle, &bus);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_ERR PREFIX
+ "no bus number in _CRS and can't evaluate _BBN\n");
+ return -ENODEV;
+ }
+ }
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
return -ENOMEM;
- INIT_LIST_HEAD(&root->node);
+ INIT_LIST_HEAD(&root->node);
root->device = device;
+ root->segment = segment & 0xFFFF;
+ root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
- device->ops.bind = acpi_pci_bind;
-
/*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
@@ -395,79 +514,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
- /*
- * Segment
- * -------
- * Obtained via _SEG, if exists, otherwise assumed to be zero (0).
- */
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
- &value);
- switch (status) {
- case AE_OK:
- root->id.segment = (u16) value;
- break;
- case AE_NOT_FOUND:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Assuming segment 0 (no _SEG)\n"));
- root->id.segment = 0;
- break;
- default:
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SEG"));
- result = -ENODEV;
- goto end;
- }
-
- /*
- * Bus
- * ---
- * Obtained via _BBN, if exists, otherwise assumed to be zero (0).
- */
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL,
- &value);
- switch (status) {
- case AE_OK:
- root->id.bus = (u16) value;
- break;
- case AE_NOT_FOUND:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming bus 0 (no _BBN)\n"));
- root->id.bus = 0;
- break;
- default:
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BBN"));
- result = -ENODEV;
- goto end;
- }
-
- /* Some systems have wrong _BBN */
- list_for_each_entry(tmp, &acpi_pci_roots, node) {
- if ((tmp->id.segment == root->id.segment)
- && (tmp->id.bus == root->id.bus)) {
- int bus = 0;
- acpi_status status;
-
- printk(KERN_ERR PREFIX
- "Wrong _BBN value, reboot"
- " and use option 'pci=noacpi'\n");
-
- status = try_get_root_bridge_busnr(device->handle, &bus);
- if (ACPI_FAILURE(status))
- break;
- if (bus != root->id.bus) {
- printk(KERN_INFO PREFIX
- "PCI _CRS %d overrides _BBN 0\n", bus);
- root->id.bus = bus;
- }
- break;
- }
- }
- /*
- * Device & Function
- * -----------------
- * Obtained from _ADR (which has already been evaluated for us).
- */
- root->id.device = device->pnp.bus_address >> 16;
- root->id.function = device->pnp.bus_address & 0xFFFF;
-
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
*/
@@ -477,7 +523,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->id.segment, root->id.bus);
+ root->segment, root->bus_nr);
/*
* Scan the Root Bridge
@@ -486,11 +532,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, root->id.segment, root->id.bus);
+ root->bus = pci_acpi_scan_root(device, segment, bus);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->id.segment, root->id.bus);
+ root->segment, root->bus_nr);
result = -ENODEV;
goto end;
}
@@ -500,7 +546,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* -----------------------
* Thus binding the ACPI and PCI devices.
*/
- result = acpi_pci_bind_root(device, &root->id, root->bus);
+ result = acpi_pci_bind_root(device);
if (result)
goto end;
@@ -511,8 +557,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
if (ACPI_SUCCESS(status))
- result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
- root->id.bus);
+ result = acpi_pci_irq_add_prt(device->handle, root->bus);
/*
* Scan and bind all _ADR-Based Devices
@@ -531,42 +576,28 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
- end:
- if (result) {
- if (!list_empty(&root->node))
- list_del(&root->node);
- kfree(root);
- }
+ return 0;
+end:
+ if (!list_empty(&root->node))
+ list_del(&root->node);
+ kfree(root);
return result;
}
static int acpi_pci_root_start(struct acpi_device *device)
{
- struct acpi_pci_root *root;
+ struct acpi_pci_root *root = acpi_driver_data(device);
-
- list_for_each_entry(root, &acpi_pci_roots, node) {
- if (root->device == device) {
- pci_bus_add_devices(root->bus);
- return 0;
- }
- }
- return -ENODEV;
+ pci_bus_add_devices(root->bus);
+ return 0;
}
static int acpi_pci_root_remove(struct acpi_device *device, int type)
{
- struct acpi_pci_root *root = NULL;
-
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- root = acpi_driver_data(device);
+ struct acpi_pci_root *root = acpi_driver_data(device);
kfree(root);
-
return 0;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 56665a63bf1..d74365d4a6e 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -194,7 +194,7 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
{
- int result = 0, state;
+ int result = 0;
int found = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
@@ -236,18 +236,6 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
if (ACPI_FAILURE(status))
return -ENODEV;
- if (!acpi_power_nocheck) {
- /*
- * If acpi_power_nocheck is set, it is unnecessary to check
- * the power state after power transition.
- */
- result = acpi_power_get_state(resource->device->handle,
- &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_ON)
- return -ENOEXEC;
- }
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D0;
@@ -258,7 +246,7 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
{
- int result = 0, state;
+ int result = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
struct list_head *node, *next;
@@ -293,18 +281,6 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
if (ACPI_FAILURE(status))
return -ENODEV;
- if (!acpi_power_nocheck) {
- /*
- * If acpi_power_nocheck is set, it is unnecessary to check
- * the power state after power transition.
- */
- result = acpi_power_get_state(handle, &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_OFF)
- return -ENOEXEC;
- }
-
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D3;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 23f0fb84f1c..84e0f3c0744 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -89,7 +89,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr);
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
- {ACPI_PROCESSOR_HID, 0},
+ {"ACPI0007", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
@@ -596,7 +596,21 @@ static int acpi_processor_get_info(struct acpi_device *device)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No bus mastering arbitration control\n"));
- if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
+ if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
+ /* Declared with "Processor" statement; match ProcessorID */
+ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Evaluating processor object\n");
+ return -ENODEV;
+ }
+
+ /*
+ * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
+ * >>> 'acpi_get_processor_id(acpi_id, &id)' in
+ * arch/xxx/acpi.c
+ */
+ pr->acpi_id = object.processor.proc_id;
+ } else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
@@ -611,20 +625,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
device_declaration = 1;
pr->acpi_id = value;
- } else {
- /* Declared with "Processor" statement; match ProcessorID */
- status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Evaluating processor object\n");
- return -ENODEV;
- }
-
- /*
- * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
- * >>> 'acpi_get_processor_id(acpi_id, &id)' in
- * arch/xxx/acpi.c
- */
- pr->acpi_id = object.processor.proc_id;
}
cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
@@ -649,7 +649,16 @@ static int acpi_processor_get_info(struct acpi_device *device)
return -ENODEV;
}
}
-
+ /*
+ * On some boxes several processors use the same processor bus id.
+ * But they are located in different scope. For example:
+ * \_SB.SCK0.CPU0
+ * \_SB.SCK1.CPU0
+ * Rename the processor device bus id. And the new bus id will be
+ * generated as the following format:
+ * CPU+CPU ID.
+ */
+ sprintf(acpi_device_bid(device), "CPU%X", pr->id);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
pr->acpi_id));
@@ -731,6 +740,8 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
/* _PDC call should be done before doing anything else (if reqd.). */
arch_acpi_processor_init_pdc(pr);
acpi_processor_set_pdc(pr);
+ arch_acpi_processor_cleanup_pdc(pr);
+
#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr);
#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 10a2d913635..0efa59e7e3a 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -139,7 +139,7 @@ static void acpi_safe_halt(void)
* are affected too. We pick the most conservative approach: we assume
* that the local APIC stops in both C2 and C3.
*/
-static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
struct acpi_processor_power *pwr = &pr->power;
@@ -162,7 +162,7 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
pr->power.timer_broadcast_on_state = state;
}
-static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
{
unsigned long reason;
@@ -173,7 +173,7 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
}
/* Power(C) State timer broadcast control */
-static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
@@ -190,10 +190,10 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
#else
-static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cstate) { }
-static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
-static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
+static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
@@ -515,7 +515,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
- static int bm_check_flag;
+ static int bm_check_flag = -1;
+ static int bm_control_flag = -1;
if (!cx->address)
@@ -545,12 +546,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
}
/* All the logic here assumes flags.bm_check is same across all CPUs */
- if (!bm_check_flag) {
+ if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check;
+ bm_control_flag = pr->flags.bm_control;
} else {
pr->flags.bm_check = bm_check_flag;
+ pr->flags.bm_control = bm_control_flag;
}
if (pr->flags.bm_check) {
@@ -614,29 +617,25 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
- acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C2:
acpi_processor_power_verify_c2(cx);
- if (cx->valid)
- acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
- if (cx->valid)
- acpi_timer_check_state(i, pr, cx);
break;
}
- if (cx->valid)
- tsc_check_state(cx->type);
+ if (!cx->valid)
+ continue;
- if (cx->valid)
- working++;
+ lapic_timer_check_state(i, pr, cx);
+ tsc_check_state(cx->type);
+ working++;
}
- acpi_propagate_timer_broadcast(pr);
+ lapic_timer_propagate_broadcast(pr);
return (working);
}
@@ -839,7 +838,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
return 0;
}
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
@@ -847,7 +846,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_enable();
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
return idle_time;
}
@@ -892,7 +891,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
* Must be done before busmaster disable as we might need to
* access HPET !
*/
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
@@ -914,7 +913,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return idle_time;
}
@@ -981,7 +980,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* Must be done before busmaster disable as we might need to
* access HPET !
*/
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
/*
@@ -1026,7 +1025,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return idle_time;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8ff510b91d8..781435d7e36 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -95,7 +95,7 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-static int acpi_bus_hot_remove_device(void *context)
+static void acpi_bus_hot_remove_device(void *context)
{
struct acpi_device *device;
acpi_handle handle = context;
@@ -104,10 +104,10 @@ static int acpi_bus_hot_remove_device(void *context)
acpi_status status = AE_OK;
if (acpi_bus_get_device(handle, &device))
- return 0;
+ return;
if (!device)
- return 0;
+ return;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
@@ -115,7 +115,7 @@ static int acpi_bus_hot_remove_device(void *context)
if (acpi_bus_trim(device, 1)) {
printk(KERN_ERR PREFIX
"Removing device failed\n");
- return -1;
+ return;
}
/* power off device */
@@ -142,9 +142,10 @@ static int acpi_bus_hot_remove_device(void *context)
*/
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ printk(KERN_WARNING PREFIX
+ "Eject device failed\n");
- return 0;
+ return;
}
static ssize_t
@@ -155,7 +156,6 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
acpi_status status;
acpi_object_type type = 0;
struct acpi_device *acpi_device = to_acpi_device(d);
- struct task_struct *task;
if ((!count) || (buf[0] != '1')) {
return -EINVAL;
@@ -172,11 +172,7 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
goto err;
}
- /* remove the device in another thread to fix the deadlock issue */
- task = kthread_run(acpi_bus_hot_remove_device,
- acpi_device->handle, "acpi_hot_remove_device");
- if (IS_ERR(task))
- ret = PTR_ERR(task);
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle);
err:
return ret;
}
@@ -198,12 +194,12 @@ acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *b
int result;
result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
- if(result)
+ if (result)
goto end;
result = sprintf(buf, "%s\n", (char*)path.pointer);
kfree(path.pointer);
- end:
+end:
return result;
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
@@ -217,21 +213,21 @@ static int acpi_device_setup_files(struct acpi_device *dev)
/*
* Devices gotten from FADT don't have a "path" attribute
*/
- if(dev->handle) {
+ if (dev->handle) {
result = device_create_file(&dev->dev, &dev_attr_path);
- if(result)
+ if (result)
goto end;
}
- if(dev->flags.hardware_id) {
+ if (dev->flags.hardware_id) {
result = device_create_file(&dev->dev, &dev_attr_hid);
- if(result)
+ if (result)
goto end;
}
- if (dev->flags.hardware_id || dev->flags.compatible_ids){
+ if (dev->flags.hardware_id || dev->flags.compatible_ids) {
result = device_create_file(&dev->dev, &dev_attr_modalias);
- if(result)
+ if (result)
goto end;
}
@@ -242,7 +238,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
if (ACPI_SUCCESS(status))
result = device_create_file(&dev->dev, &dev_attr_eject);
- end:
+end:
return result;
}
@@ -262,9 +258,9 @@ static void acpi_device_remove_files(struct acpi_device *dev)
if (dev->flags.hardware_id || dev->flags.compatible_ids)
device_remove_file(&dev->dev, &dev_attr_modalias);
- if(dev->flags.hardware_id)
+ if (dev->flags.hardware_id)
device_remove_file(&dev->dev, &dev_attr_hid);
- if(dev->handle)
+ if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
/* --------------------------------------------------------------------------
@@ -512,7 +508,7 @@ static int acpi_device_register(struct acpi_device *device,
break;
}
}
- if(!found) {
+ if (!found) {
acpi_device_bus_id = new_bus_id;
strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
acpi_device_bus_id->instance_no = 0;
@@ -530,22 +526,21 @@ static int acpi_device_register(struct acpi_device *device,
if (device->parent)
device->dev.parent = &parent->dev;
device->dev.bus = &acpi_bus_type;
- device_initialize(&device->dev);
device->dev.release = &acpi_device_release;
- result = device_add(&device->dev);
- if(result) {
- dev_err(&device->dev, "Error adding device\n");
+ result = device_register(&device->dev);
+ if (result) {
+ dev_err(&device->dev, "Error registering device\n");
goto end;
}
result = acpi_device_setup_files(device);
- if(result)
+ if (result)
printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
dev_name(&device->dev));
device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
return 0;
- end:
+end:
mutex_lock(&acpi_device_lock);
if (device->parent)
list_del(&device->node);
@@ -577,7 +572,7 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
* @device: the device to add and initialize
* @driver: driver for the device
*
- * Used to initialize a device via its device driver. Called whenever a
+ * Used to initialize a device via its device driver. Called whenever a
* driver is bound to a device. Invokes the driver's add() ops.
*/
static int
@@ -585,7 +580,6 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
{
int result = 0;
-
if (!device || !driver)
return -EINVAL;
@@ -802,7 +796,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1;
- end:
+end:
if (ACPI_FAILURE(status))
device->flags.wake_capable = 0;
return 0;
@@ -1070,7 +1064,7 @@ static void acpi_device_set_id(struct acpi_device *device,
break;
}
- /*
+ /*
* \_SB
* ----
* Fix for the system root bus device -- the only root-level device.
@@ -1320,7 +1314,7 @@ acpi_add_single_object(struct acpi_device **child,
device->parent->ops.bind(device);
}
- end:
+end:
if (!result)
*child = device;
else {
@@ -1464,7 +1458,6 @@ acpi_bus_add(struct acpi_device **child,
return result;
}
-
EXPORT_SYMBOL(acpi_bus_add);
int acpi_bus_start(struct acpi_device *device)
@@ -1484,7 +1477,6 @@ int acpi_bus_start(struct acpi_device *device)
}
return result;
}
-
EXPORT_SYMBOL(acpi_bus_start);
int acpi_bus_trim(struct acpi_device *start, int rmdevice)
@@ -1542,7 +1534,6 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice)
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
-
static int acpi_bus_scan_fixed(struct acpi_device *root)
{
int result = 0;
@@ -1610,6 +1601,6 @@ int __init acpi_scan_init(void)
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
- Done:
+Done:
return result;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1bdfb37377e..8851315ce85 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -76,6 +76,7 @@ MODULE_LICENSE("GPL");
static int brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
+static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
static int acpi_video_resume(struct acpi_device *device);
@@ -586,6 +587,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
},
},
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "Acer Aspire 7720",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
+ },
+ },
{}
};
@@ -976,6 +985,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
device->backlight->props.max_brightness = device->brightness->count-3;
kfree(name);
+ result = sysfs_create_link(&device->backlight->dev.kobj,
+ &device->dev->dev.kobj, "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+
device->cdev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
if (IS_ERR(device->cdev))
@@ -1054,15 +1068,15 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
static int acpi_video_bus_check(struct acpi_video_bus *video)
{
acpi_status status = -ENOENT;
- struct device *dev;
+ struct pci_dev *dev;
if (!video)
return -EINVAL;
- dev = acpi_get_physical_pci_device(video->device->handle);
+ dev = acpi_get_pci_dev(video->device->handle);
if (!dev)
return -ENODEV;
- put_device(dev);
+ pci_dev_put(dev);
/* Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
@@ -1990,6 +2004,7 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
+ sysfs_remove_link(&device->backlight->dev.kobj, "device");
backlight_device_unregister(device->backlight);
if (device->cdev) {
sysfs_remove_link(&device->dev->dev.kobj,
@@ -2318,6 +2333,13 @@ static int __init intel_opregion_present(void)
int acpi_video_register(void)
{
int result = 0;
+ if (register_count) {
+ /*
+ * if the function of acpi_video_register is already called,
+ * don't register the acpi_vide_bus again and return no error.
+ */
+ return 0;
+ }
acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
if (!acpi_video_dir)
@@ -2329,10 +2351,35 @@ int acpi_video_register(void)
return -ENODEV;
}
+ /*
+ * When the acpi_video_bus is loaded successfully, increase
+ * the counter reference.
+ */
+ register_count = 1;
+
return 0;
}
EXPORT_SYMBOL(acpi_video_register);
+void acpi_video_unregister(void)
+{
+ if (!register_count) {
+ /*
+ * If the acpi video bus is already unloaded, don't
+ * unload it again and return directly.
+ */
+ return;
+ }
+ acpi_bus_unregister_driver(&acpi_video_bus);
+
+ remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+
+ register_count = 0;
+
+ return;
+}
+EXPORT_SYMBOL(acpi_video_unregister);
+
/*
* This is kind of nasty. Hardware using Intel chipsets may require
* the video opregion code to be run first in order to initialise
@@ -2350,16 +2397,12 @@ static int __init acpi_video_init(void)
return acpi_video_register();
}
-void acpi_video_exit(void)
+static void __exit acpi_video_exit(void)
{
-
- acpi_bus_unregister_driver(&acpi_video_bus);
-
- remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+ acpi_video_unregister();
return;
}
-EXPORT_SYMBOL(acpi_video_exit);
module_init(acpi_video_init);
module_exit(acpi_video_exit);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 09737275e25..7cd2b63435e 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -10,7 +10,7 @@
* assinged
*
* After PCI devices are glued with ACPI devices
- * acpi_get_physical_pci_device() can be called to identify ACPI graphics
+ * acpi_get_pci_dev() can be called to identify ACPI graphics
* devices for which a real graphics card is plugged in
*
* Now acpi_video_get_capabilities() can be called to check which
@@ -36,6 +36,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
ACPI_MODULE_NAME("video");
#define _COMPONENT ACPI_VIDEO_COMPONENT
@@ -109,7 +110,7 @@ static acpi_status
find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
{
long *cap = context;
- struct device *dev;
+ struct pci_dev *dev;
struct acpi_device *acpi_dev;
const struct acpi_device_id video_ids[] = {
@@ -120,10 +121,10 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (!acpi_match_device_ids(acpi_dev, video_ids)) {
- dev = acpi_get_physical_pci_device(handle);
+ dev = acpi_get_pci_dev(handle);
if (!dev)
return AE_OK;
- put_device(dev);
+ pci_dev_put(dev);
*cap |= acpi_is_video_device(acpi_dev);
}
return AE_OK;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 862b40c9018..91b75301378 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
- LOCK_FDC(drive, 1);
+ if (lock_fdc(drive, 1)) {
+ mutex_unlock(&open_lock);
+ return -EINTR;
+ }
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 140ea10ecb8..c02db01f736 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -27,6 +27,7 @@
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/mm.h>
+#include <asm/pgtable.h>
#include <asm/io.h>
/*
@@ -75,12 +76,13 @@ static struct class *bsr_class;
static int bsr_major;
enum {
- BSR_8 = 0,
- BSR_16 = 1,
- BSR_64 = 2,
- BSR_128 = 3,
- BSR_UNKNOWN = 4,
- BSR_MAX = 5,
+ BSR_8 = 0,
+ BSR_16 = 1,
+ BSR_64 = 2,
+ BSR_128 = 3,
+ BSR_4096 = 4,
+ BSR_UNKNOWN = 5,
+ BSR_MAX = 6,
};
static unsigned bsr_types[BSR_MAX];
@@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
struct bsr_dev *dev = filp->private_data;
+ int ret;
- if (size > dev->bsr_len || (size & (PAGE_SIZE-1)))
- return -EINVAL;
-
- vma->vm_flags |= (VM_IO | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT,
- size, vma->vm_page_prot))
+ /* check for the case of a small BSR device and map one 4k page for it*/
+ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
+ ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
+ vma->vm_page_prot);
+ else if (size <= dev->bsr_len)
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ dev->bsr_addr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ else
+ return -EINVAL;
+
+ if (ret)
return -EAGAIN;
return 0;
@@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_stride = bsr_stride[i];
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
+ /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
+ /* we can only map 4k of it, so only advertise the 4k in sysfs */
+ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
+ cur->bsr_len = 4096;
+
switch(cur->bsr_bytes) {
case 8:
cur->bsr_type = BSR_8;
@@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn)
case 128:
cur->bsr_type = BSR_128;
break;
+ case 4096:
+ cur->bsr_type = BSR_4096;
+ break;
default:
cur->bsr_type = BSR_UNKNOWN;
- printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes);
}
cur->bsr_num = bsr_types[cur->bsr_type];
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 9533f43a30b..52d953eb30c 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1048,8 +1048,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
if (retval)
return retval;
- /* unmark here for very high baud rate (ex. 921600 bps) used */
- tty->low_latency = 1;
return 0;
}
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index d6102b644b5..574f1c79b6e 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1591,8 +1591,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file)
/* Enable interrupt downlink for channel */
if (port->port.count == 1) {
- /* FIXME: is this needed now ? */
- tty->low_latency = 1;
tty->driver_data = port;
tty_port_tty_set(&port->port, tty);
DBG1("open: %d", port->token_dl);
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 1386625fc4c..a2e67e6df3a 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -467,7 +467,6 @@ static unsigned int free_tbuf_count(struct slgt_info *info);
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
-static void tdma_start(struct slgt_info *info);
static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
static void get_signals(struct slgt_info *info);
@@ -795,6 +794,18 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
}
}
+static void update_tx_timer(struct slgt_info *info)
+{
+ /*
+ * use worst case speed of 1200bps to calculate transmit timeout
+ * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
+ */
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ int timeout = (tbuf_bytes(info) * 7) + 1000;
+ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
static int write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
@@ -838,8 +849,18 @@ start:
spin_lock_irqsave(&info->lock,flags);
if (!info->tx_active)
tx_start(info);
- else
- tdma_start(info);
+ else if (!(rd_reg32(info, TDCSR) & BIT0)) {
+ /* transmit still active but transmit DMA stopped */
+ unsigned int i = info->tbuf_current;
+ if (!i)
+ i = info->tbuf_count;
+ i--;
+ /* if DMA buf unsent must try later after tx idle */
+ if (desc_count(info->tbufs[i]))
+ ret = 0;
+ }
+ if (ret > 0)
+ update_tx_timer(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -1502,10 +1523,9 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
- /* start hardware transmitter if necessary */
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- tx_start(info);
+ tx_start(info);
+ update_tx_timer(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -3946,50 +3966,19 @@ static void tx_start(struct slgt_info *info)
slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
/* clear tx idle and underrun status bits */
wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
- if (info->params.mode == MGSL_MODE_HDLC)
- mod_timer(&info->tx_timer, jiffies +
- msecs_to_jiffies(5000));
} else {
slgt_irq_off(info, IRQ_TXDATA);
slgt_irq_on(info, IRQ_TXIDLE);
/* clear tx idle status bit */
wr_reg16(info, SSR, IRQ_TXIDLE);
}
- tdma_start(info);
+ /* set 1st descriptor address and start DMA */
+ wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
+ wr_reg32(info, TDCSR, BIT2 + BIT0);
info->tx_active = true;
}
}
-/*
- * start transmit DMA if inactive and there are unsent buffers
- */
-static void tdma_start(struct slgt_info *info)
-{
- unsigned int i;
-
- if (rd_reg32(info, TDCSR) & BIT0)
- return;
-
- /* transmit DMA inactive, check for unsent buffers */
- i = info->tbuf_start;
- while (!desc_count(info->tbufs[i])) {
- if (++i == info->tbuf_count)
- i = 0;
- if (i == info->tbuf_current)
- return;
- }
- info->tbuf_start = i;
-
- /* there are unsent buffers, start transmit DMA */
-
- /* reset needed if previous error condition */
- tdma_reset(info);
-
- /* set 1st descriptor address */
- wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
- wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */
-}
-
static void tx_stop(struct slgt_info *info)
{
unsigned short val;
@@ -5004,8 +4993,7 @@ static void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = false;
- info->tx_count = 0;
+ tx_stop(info);
spin_unlock_irqrestore(&info->lock,flags);
#if SYNCLINK_GENERIC_HDLC
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index a19e935847b..913aa8d3f1c 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -867,15 +867,22 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_wait_idle(tty);
/*
- * Shutdown the current line discipline, and reset it to N_TTY.
- *
- * FIXME: this MUST get fixed for the new reflocking
+ * Now kill off the ldisc
*/
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
- tty_ldisc_reinit(tty);
/* This will need doing differently if we need to lock */
if (o_tty)
tty_ldisc_release(o_tty, NULL);
+
+ /* And the memory resources remaining (buffers, termios) will be
+ disposed of when the kref hits zero */
}
/**
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 62dadfc95e3..4e862a75f7f 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -193,7 +193,7 @@ int tty_port_block_til_ready(struct tty_port *port,
{
int do_clocal = 0, retval;
unsigned long flags;
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
int cd;
/* block if port is in the process of being closed */
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 9aa9ea9822c..88dab52926f 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -432,23 +432,27 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
list_splice_init(&txd->tx_list, &dc->free_list);
list_move(&desc->desc_node, &dc->free_list);
- /*
- * We use dma_unmap_page() regardless of how the buffers were
- * mapped before they were submitted...
- */
if (!ds) {
dma_addr_t dmaaddr;
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.DAR : desc->hwdesc32.DAR;
- dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
- desc->len, DMA_FROM_DEVICE);
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.SAR : desc->hwdesc32.SAR;
- dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
- desc->len, DMA_TO_DEVICE);
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_TO_DEVICE);
}
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c36bf40568c..858fe603722 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -754,13 +754,13 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
int bit;
- enum dev_type edac_cap = EDAC_NONE;
+ enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
? 19
: 17;
- if (pvt->dclr0 >> BIT(bit))
+ if (pvt->dclr0 & BIT(bit))
edac_cap = EDAC_FLAG_SECDED;
return edac_cap;
@@ -1269,7 +1269,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
if (channels == 0)
channels = 1;
- debugf0("DIMM count= %d\n", channels);
+ debugf0("MCT channel count: %d\n", channels);
return channels;
@@ -2966,7 +2966,12 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
" Use of the override can cause "
"unknown side effects.\n");
ret = -ENODEV;
- }
+ } else
+ /*
+ * enable further driver loading if ECC enable is
+ * overridden.
+ */
+ ret = 0;
} else {
amd64_printk(KERN_INFO,
"ECC is enabled by BIOS, Proceeding "
@@ -3006,7 +3011,6 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
if (pvt->nbcap & K8_NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
@@ -3052,7 +3056,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
if (!pvt)
goto err_exit;
- pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
+ pvt->mc_node_id = get_node_id(dram_f2_ctl);
pvt->dram_f2_ctl = dram_f2_ctl;
pvt->ext_model = boot_cpu_data.x86_model >> 4;
@@ -3179,8 +3183,7 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
{
int ret = 0;
- debugf0("(MC node=%d,mc_type='%s')\n",
- get_mc_node_id_from_pdev(pdev),
+ debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
get_amd_family_name(mc_type->driver_data));
ret = pci_enable_device(pdev);
@@ -3319,15 +3322,17 @@ static int __init amd64_edac_init(void)
err = amd64_init_2nd_stage(pvt_lookup[nb]);
if (err)
- goto err_exit;
+ goto err_2nd_stage;
}
amd64_setup_pci_device();
return 0;
+err_2nd_stage:
+ debugf0("2nd stage failed\n");
+
err_exit:
- debugf0("'finish_setup' stage failed\n");
pci_unregister_driver(&amd64_pci_driver);
return err;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index a159957e167..ba73015af8e 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -444,7 +444,7 @@ enum {
#define K8_MSR_MC4ADDR 0x0412
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
+static inline int get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3493c6bdb82..871c13b4c14 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -150,6 +150,8 @@ enum mem_type {
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
MEM_XDR, /* Rambus XDR */
+ MEM_DDR3, /* DDR3 RAM */
+ MEM_RDDR3, /* Registered DDR3 RAM */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -167,6 +169,8 @@ enum mem_type {
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ad218fe4942..e1d4ce08348 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -94,7 +94,9 @@ static const char *mem_types[] = {
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
- [MEM_XDR] = "XDR"
+ [MEM_XDR] = "XDR",
+ [MEM_DDR3] = "Unbuffered-DDR3",
+ [MEM_RDDR3] = "Registered-DDR3"
};
static const char *dev_types[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7c8c2d72916..3f2ccfc6407 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_RDDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_RDDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_DDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_DDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 135b3539a03..52432ee7c4b 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -53,6 +53,7 @@
#define DSC_SDTYPE_DDR 0x02000000
#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_SDTYPE_DDR3 0x07000000
#define DSC_X32_EN 0x00000020
/* Err_Int_En */
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index aa8e7cb020d..4ee4c8367a3 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
+static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+
+ if (chip->irq_base == (unsigned) -1)
+ return -EINVAL;
+
+ return chip->irq_base + offset;
+}
+
/*
* PL061 GPIO IRQ
*/
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->chip->ack(irq);
list_for_each(ptr, chip_list) {
unsigned long pending;
- int gpio;
+ int offset;
chip = list_entry(ptr, struct pl061_gpio, list);
pending = readb(chip->base + GPIOMIS);
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
if (pending == 0)
continue;
- for_each_bit(gpio, &pending, PL061_GPIO_NR)
- generic_handle_irq(gpio_to_irq(gpio));
+ for_each_bit(offset, &pending, PL061_GPIO_NR)
+ generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->chip->unmask(irq);
}
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
struct pl061_gpio *chip;
struct list_head *chip_list;
int ret, irq, i;
- static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)];
+ static DECLARE_BITMAP(init_irq, NR_IRQS);
pdata = dev->dev.platform_data;
if (pdata == NULL)
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
+ chip->gc.to_irq = pl061_to_irq;
chip->gc.base = pdata->gpio_base;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
if (chip_list == NULL) {
+ clear_bit(irq, init_irq);
ret = -ENOMEM;
goto iounmap;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c961fe415ae..39b393d38bb 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -81,6 +81,7 @@ config DRM_I830
config DRM_I915
tristate "i915 driver"
+ depends on AGP_INTEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4e89ab08b7b..fe23f29f7cb 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
@@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
-obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7d0835226f6..80cc6d06d61 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
- unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo;
- unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf);
- unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
if (hactive < 64 || vactive < 64)
@@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
- mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 51c5a050aa7..30d6b99fb30 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_dp.o \
+ intel_dp_i2c.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index e747ac42fe3..288fc50627e 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -37,7 +37,7 @@ struct intel_dvo_device {
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
- struct intel_i2c_chan *i2c_bus;
+ struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
@@ -52,7 +52,7 @@ struct intel_dvo_dev_ops {
* Returns NULL if the device does not exist.
*/
bool (*init)(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus);
+ struct i2c_adapter *i2cbus);
/*
* Called to allow the output a chance to create properties after the
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 03d4b4973b0..621815b531d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
uint8_t val;
@@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
@@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
- val, i2cbus->adapter.name,i2cbus->slave_addr);
+ val, i2cbus->adapter.name,dvo->slave_addr);
goto fail;
}
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d2fd95dbd03..a9b89628968 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid)
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
static bool ch7xxx_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
@@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (ch7xxx == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = ch7xxx;
ch7xxx->quiet = true;
@@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
@@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (device != CH7xxx_DID) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0c8d375e8e3..aa176f9921f 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 0,
},
@@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
@@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (!priv->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
@@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
if (!priv->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
/** Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
uint16_t temp;
@@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
priv->quiet = true;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 033a4bb070b..e1c1f7341e5 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -76,19 +76,20 @@ struct sil164_priv {
static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!sil->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!sil->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
/* Silicon Image 164 driver for chip on i2c bus */
static bool sil164_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the SIL164 chip on the specified i2c bus */
struct sil164_priv *sil;
@@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (sil == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = sil;
sil->quiet = true;
@@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (ch != (SIL164_VID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
@@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (ch != (SIL164_DID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 207fda806eb..9ecc907384e 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -101,19 +101,20 @@ struct tfp410_priv {
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!tfp->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!tfp->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
/* Ti TFP410 driver for chip on i2c bus */
static bool tfp410_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the tfp410 chip on the specified i2c bus */
struct tfp410_priv *tfp;
@@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
if (tfp == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = tfp;
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98560e1e899..e3cb4025e32 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,8 +67,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_save_state(dev->pdev);
- i915_save_state(dev);
-
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (i915_gem_idle(dev))
@@ -77,6 +75,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
drm_irq_uninstall(dev);
}
+ i915_save_state(dev);
+
intel_opregion_free(dev, 1);
if (state.event == PM_EVENT_SUSPEND) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a84f04e843..bb4c2d387b6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -306,6 +306,17 @@ typedef struct drm_i915_private {
u32 saveCURBPOS;
u32 saveCURBBASE;
u32 saveCURSIZE;
+ u32 saveDP_B;
+ u32 saveDP_C;
+ u32 saveDP_D;
+ u32 savePIPEA_GMCH_DATA_M;
+ u32 savePIPEB_GMCH_DATA_M;
+ u32 savePIPEA_GMCH_DATA_N;
+ u32 savePIPEB_GMCH_DATA_N;
+ u32 savePIPEA_DP_LINK_M;
+ u32 savePIPEB_DP_LINK_M;
+ u32 savePIPEA_DP_LINK_N;
+ u32 savePIPEB_DP_LINK_N;
struct {
struct drm_mm gtt_space;
@@ -857,6 +868,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd2b8bdffe3..876b65cb762 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1006,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+ DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
@@ -1050,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %d)\n",
+ DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
obj_priv = obj->driver_private;
@@ -2423,7 +2423,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
#if WATCH_BUF
- DRM_INFO("Binding object of size %d at 0x%08x\n",
+ DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
ret = i915_gem_object_get_pages(obj);
@@ -4227,6 +4227,7 @@ i915_gem_lastclose(struct drm_device *dev)
void
i915_gem_load(struct drm_device *dev)
{
+ int i;
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4246,6 +4247,18 @@ i915_gem_load(struct drm_device *dev)
else
dev_priv->num_fence_regs = 8;
+ /* Initialize fence registers to zero */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+ } else {
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ }
+
i915_gem_detect_bit_6_swizzle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8d0b943e2c5..e602614bd3f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->page_list[page],
+ i915_gem_dump_page(obj_priv->pages[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
page * PAGE_SIZE,
@@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
uint32_t *backing_map = NULL;
int bad_count = 0;
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+ backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 5c1ceec49f5..daeae62e1c2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
if (mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
ret = 0;
goto out_put;
}
+#endif
/* Get some space for it */
ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b86b7b7130c..228546f6eaa 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -232,7 +232,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
hotplug_work);
struct drm_device *dev = dev_priv->dev;
-
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->hot_plug)
+ (*intel_output->hot_plug) (intel_output);
+ }
+ }
/* Just fire off a uevent and let userspace tell us what to do */
drm_sysfs_hotplug_event(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index dc425e74a26..e4b4e8898e3 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -419,7 +419,7 @@ void intel_opregion_free(struct drm_device *dev, int suspend)
return;
if (!suspend)
- acpi_video_exit();
+ acpi_video_unregister();
opregion->acpi->drdy = 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f6237a0b113..88bf7521405 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -569,6 +569,19 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
+/* Clocking configuration register */
+#define CLKCFG 0x10c00
+#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+/* this is a guess, could be 5 as well */
+#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT (5 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
@@ -834,9 +847,25 @@
#define HORIZ_INTERP_MASK (3 << 6)
#define HORIZ_AUTO_SCALE (1 << 5)
#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_FILTER_FUZZY (0 << 24)
+#define PFIT_SCALING_AUTO (0 << 26)
+#define PFIT_SCALING_PROGRAMMED (1 << 26)
+#define PFIT_SCALING_PILLAR (2 << 26)
+#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS 0x61234
#define PFIT_VERT_SCALE_MASK 0xfff00000
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* Pre-965 */
+#define PFIT_VERT_SCALE_SHIFT 20
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_SHIFT 4
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* 965+ */
+#define PFIT_VERT_SCALE_SHIFT_965 16
+#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
+#define PFIT_HORIZ_SCALE_SHIFT_965 0
+#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
+
#define PFIT_AUTO_RATIOS 0x61238
/* Backlight control */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a98e2831ed3..8d8e083d14a 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -322,6 +322,20 @@ int i915_save_state(struct drm_device *dev)
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->saveDP_B = I915_READ(DP_B);
+ dev_priv->saveDP_C = I915_READ(DP_C);
+ dev_priv->saveDP_D = I915_READ(DP_D);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+ }
/* FIXME: save TV & SDVO state */
/* FBC state */
@@ -404,7 +418,19 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
}
-
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ }
+
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -518,6 +544,12 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->saveDP_B);
+ I915_WRITE(DP_C, dev_priv->saveDP_C);
+ I915_WRITE(DP_D, dev_priv->saveDP_D);
+ }
/* FIXME: restore TV & SDVO state */
/* FBC info */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index cdd126d068a..716409a5724 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -99,9 +99,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
{
struct bdb_lvds_options *lvds_options;
struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
struct bdb_lvds_lfp_data_entry *entry;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int lfp_data_size;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
@@ -119,9 +121,17 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data)
return;
+ lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+ if (!lvds_lfp_data_ptrs)
+ return;
+
dev_priv->lvds_vbt = 1;
- entry = &lvds_lfp_data->data[lvds_options->panel_type];
+ lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
+ lvds_options->panel_type));
dvo_timing = &entry->dvo_timing;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3e1c7816211..73e7b9cecac 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,6 +29,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_dp.h"
#include "drm_crtc_helper.h"
@@ -127,19 +128,6 @@ struct intel_limit {
#define I9XX_P2_LVDS_FAST 7
#define I9XX_P2_LVDS_SLOW_LIMIT 112000
-#define INTEL_LIMIT_I8XX_DVO_DAC 0
-#define INTEL_LIMIT_I8XX_LVDS 1
-#define INTEL_LIMIT_I9XX_SDVO_DAC 2
-#define INTEL_LIMIT_I9XX_LVDS 3
-#define INTEL_LIMIT_G4X_SDVO 4
-#define INTEL_LIMIT_G4X_HDMI_DAC 5
-#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
-#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
-#define INTEL_LIMIT_IGD_SDVO_DAC 8
-#define INTEL_LIMIT_IGD_LVDS 9
-#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
-#define INTEL_LIMIT_IGDNG_LVDS 11
-
/*The parameter is for SDVO on G4x platform*/
#define G4X_DOT_SDVO_MIN 25000
#define G4X_DOT_SDVO_MAX 270000
@@ -218,6 +206,25 @@ struct intel_limit {
#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
+/*The parameter is for DISPLAY PORT on G4x platform*/
+#define G4X_DOT_DISPLAY_PORT_MIN 161670
+#define G4X_DOT_DISPLAY_PORT_MAX 227000
+#define G4X_N_DISPLAY_PORT_MIN 1
+#define G4X_N_DISPLAY_PORT_MAX 2
+#define G4X_M_DISPLAY_PORT_MIN 97
+#define G4X_M_DISPLAY_PORT_MAX 108
+#define G4X_M1_DISPLAY_PORT_MIN 0x10
+#define G4X_M1_DISPLAY_PORT_MAX 0x12
+#define G4X_M2_DISPLAY_PORT_MIN 0x05
+#define G4X_M2_DISPLAY_PORT_MAX 0x06
+#define G4X_P_DISPLAY_PORT_MIN 10
+#define G4X_P_DISPLAY_PORT_MAX 20
+#define G4X_P1_DISPLAY_PORT_MIN 1
+#define G4X_P1_DISPLAY_PORT_MAX 2
+#define G4X_P2_DISPLAY_PORT_SLOW 10
+#define G4X_P2_DISPLAY_PORT_FAST 10
+#define G4X_P2_DISPLAY_PORT_LIMIT 0
+
/* IGDNG */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
@@ -256,8 +263,11 @@ static bool
intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static const intel_limit_t intel_limits[] = {
- { /* INTEL_LIMIT_I8XX_DVO_DAC */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -269,8 +279,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I8XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -282,8 +293,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -295,8 +307,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -311,9 +324,10 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
+};
+
/* below parameter and function is for G4X Chipset Family*/
- { /* INTEL_LIMIT_G4X_SDVO */
+static const intel_limit_t intel_limits_g4x_sdvo = {
.dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
@@ -327,8 +341,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_HDMI_DAC */
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
.dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
@@ -342,8 +357,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -365,8 +381,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -388,8 +405,32 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_SDVO */
+};
+
+static const intel_limit_t intel_limits_g4x_display_port = {
+ .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
+ .max = G4X_DOT_DISPLAY_PORT_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_DISPLAY_PORT_MIN,
+ .max = G4X_N_DISPLAY_PORT_MAX },
+ .m = { .min = G4X_M_DISPLAY_PORT_MIN,
+ .max = G4X_M_DISPLAY_PORT_MAX },
+ .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
+ .max = G4X_M1_DISPLAY_PORT_MAX },
+ .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
+ .max = G4X_M2_DISPLAY_PORT_MAX },
+ .p = { .min = G4X_P_DISPLAY_PORT_MIN,
+ .max = G4X_P_DISPLAY_PORT_MAX },
+ .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
+ .max = G4X_P1_DISPLAY_PORT_MAX},
+ .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
+ .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
+ .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
+ .find_pll = intel_find_pll_g4x_dp,
+};
+
+static const intel_limit_t intel_limits_igd_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -401,8 +442,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_LVDS */
+};
+
+static const intel_limit_t intel_limits_igd_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -415,8 +457,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_igdng_sdvo = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -429,8 +472,9 @@ static const intel_limit_t intel_limits[] = {
.p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
.p2_fast = IGDNG_P2_SDVO_DAC_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_LVDS */
+};
+
+static const intel_limit_t intel_limits_igdng_lvds = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -443,16 +487,15 @@ static const intel_limit_t intel_limits[] = {
.p2_slow = IGDNG_P2_LVDS_SLOW,
.p2_fast = IGDNG_P2_LVDS_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
};
static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
{
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
+ limit = &intel_limits_igdng_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
+ limit = &intel_limits_igdng_sdvo;
return limit;
}
@@ -467,19 +510,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_dual_channel_lvds;
else
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+ limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+ limit = &intel_limits_g4x_sdvo;
+ } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
return limit;
}
@@ -495,19 +538,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
limit = intel_g4x_limit(crtc);
} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ limit = &intel_limits_i9xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
} else if (IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+ limit = &intel_limits_igd_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
+ limit = &intel_limits_igd_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+ limit = &intel_limits_i8xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
+ limit = &intel_limits_i8xx_dvo;
}
return limit;
}
@@ -764,6 +807,35 @@ out:
return found;
}
+/* DisplayPort has only two frequencies, 162MHz and 270MHz */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+{
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.dot = 161670;
+ clock.p = 20;
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 0x01;
+ clock.m = 97;
+ clock.m1 = 0x10;
+ clock.m2 = 0x05;
+ } else {
+ clock.dot = 270000;
+ clock.p = 10;
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 0x02;
+ clock.m = 108;
+ clock.m1 = 0x12;
+ clock.m2 = 0x06;
+ }
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
void
intel_wait_for_vblank(struct drm_device *dev)
{
@@ -1541,7 +1613,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const intel_limit_t *limit;
@@ -1585,6 +1657,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
}
num_outputs++;
@@ -1600,6 +1675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
} else {
refclk = 48000;
}
+
/*
* Returns a set of divisors for the desired target clock with the given
@@ -1662,6 +1738,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else if (IS_IGDNG(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
+ if (is_dp)
+ dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_IGD(dev))
@@ -1809,6 +1887,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
+ if (is_dp)
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
@@ -2475,6 +2555,8 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
@@ -2487,7 +2569,11 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_C);
}
+ if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ intel_dp_init(dev, DP_D);
} else
intel_dvo_init(dev);
@@ -2530,6 +2616,11 @@ static void intel_setup_outputs(struct drm_device *dev)
(1 << 1));
clone_mask = (1 << INTEL_OUTPUT_TVOUT);
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ crtc_mask = ((1 << 0) |
+ (1 << 1));
+ clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ break;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones = intel_connector_clones(dev, clone_mask);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 00000000000..8f8d37d5663
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,1153 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_dp.h"
+
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp_priv {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ uint32_t save_DP;
+ uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ int dpms_mode;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[4];
+ struct intel_output *intel_output;
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+};
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
+
+static int
+intel_dp_max_lane_count(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_lane_count = 4;
+
+ if (dp_priv->dpcd[0] >= 0x11) {
+ max_lane_count = dp_priv->dpcd[2] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ }
+ return max_lane_count;
+}
+
+static int
+intel_dp_max_link_bw(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_link_bw = dp_priv->dpcd[1];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+/* I think this is a fiction */
+static int
+intel_dp_link_required(int pixel_clock)
+{
+ return pixel_clock * 3;
+}
+
+static int
+intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
+ int max_lanes = intel_dp_max_lane_count(intel_output);
+
+ if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+static int
+intel_hrawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
+static int
+intel_dp_aux_ch(struct intel_output *intel_output,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t output_reg = dp_priv->output_reg;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t ctl;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try;
+
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ */
+ aux_clock_divider = intel_hrawclk(dev) / 2;
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4) {
+ uint32_t d = pack_aux(send + i, send_bytes - i);;
+
+ I915_WRITE(ch_data + i, d);
+ }
+
+ ctl = (DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl, ctl);
+ (void) I915_READ(ch_ctl);
+ for (;;) {
+ udelay(100);
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ }
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl, (ctl |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR));
+ (void) I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ printk(KERN_ERR "dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ printk(KERN_ERR "dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ printk(KERN_ERR "dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4) {
+ uint32_t d = I915_READ(ch_data + i);
+
+ unpack_aux(d, recv + i, recv_bytes - i);
+ }
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+intel_dp_aux_native_write(struct intel_output *intel_output,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+intel_dp_aux_native_write_1(struct intel_output *intel_output,
+ uint16_t address, uint8_t byte)
+{
+ return intel_dp_aux_native_write(intel_output, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+intel_dp_aux_native_read(struct intel_output *intel_output,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes)
+{
+ struct intel_dp_priv *dp_priv = container_of(adapter,
+ struct intel_dp_priv,
+ adapter);
+ struct intel_output *intel_output = dp_priv->intel_output;
+
+ return intel_dp_aux_ch(intel_output,
+ send, send_bytes, recv, recv_bytes);
+}
+
+static int
+intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ DRM_ERROR("i2c_init %s\n", name);
+ dp_priv->algo.running = false;
+ dp_priv->algo.address = 0;
+ dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
+ dp_priv->adapter.owner = THIS_MODULE;
+ dp_priv->adapter.class = I2C_CLASS_DDC;
+ strncpy (dp_priv->adapter.name, name, sizeof dp_priv->adapter.name - 1);
+ dp_priv->adapter.name[sizeof dp_priv->adapter.name - 1] = '\0';
+ dp_priv->adapter.algo_data = &dp_priv->algo;
+ dp_priv->adapter.dev.parent = &intel_output->base.kdev;
+
+ return i2c_dp_aux_add_bus(&dp_priv->adapter);
+}
+
+static bool
+intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int lane_count, clock;
+ int max_lane_count = intel_dp_max_lane_count(intel_output);
+ int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
+
+ if (intel_dp_link_required(mode->clock) <= link_avail) {
+ dp_priv->link_bw = bws[clock];
+ dp_priv->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ printk(KERN_ERR "link bw %02x lane count %d clock %d\n",
+ dp_priv->link_bw, dp_priv->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+struct intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static void
+intel_dp_compute_m_n(int bytes_per_pixel,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = pixel_clock * bytes_per_pixel;
+ m_n->gmch_n = link_clock * nlanes;
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int lane_count = 4;
+ struct intel_dp_m_n m_n;
+
+ /*
+ * Find the lane count in the intel_output private
+ */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = dp_priv->lane_count;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ intel_dp_compute_m_n(3, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ if (intel_crtc->pipe == 0) {
+ I915_WRITE(PIPEA_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEA_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
+ } else {
+ I915_WRITE(PIPEB_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEB_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
+ }
+}
+
+static void
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_crtc *crtc = intel_output->enc.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ dp_priv->DP = (DP_LINK_TRAIN_OFF |
+ DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0 |
+ DP_SYNC_VS_HIGH |
+ DP_SYNC_HS_HIGH);
+
+ switch (dp_priv->lane_count) {
+ case 1:
+ dp_priv->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ dp_priv->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ dp_priv->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (dp_priv->has_audio)
+ dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ dp_priv->link_configuration[0] = dp_priv->link_bw;
+ dp_priv->link_configuration[1] = dp_priv->lane_count;
+
+ /*
+ * Check for DPCD version > 1.1,
+ * enable enahanced frame stuff in that case
+ */
+ if (dp_priv->dpcd[0] >= 0x11) {
+ dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ dp_priv->DP |= DP_ENHANCED_FRAMING;
+ }
+
+ if (intel_crtc->pipe == 1)
+ dp_priv->DP |= DP_PIPEB_SELECT;
+}
+
+
+static void
+intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ } else {
+ if (!(dp_reg & DP_PORT_EN))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ }
+ dp_priv->dpms_mode = mode;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read(intel_output,
+ DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE);
+ if (ret != DP_LINK_STATUS_SIZE)
+ return false;
+ return true;
+}
+
+static uint8_t
+intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static void
+intel_dp_save(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ dp_priv->save_DP = I915_READ(dp_priv->output_reg);
+ intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
+ dp_priv->save_link_configuration,
+ sizeof (dp_priv->save_link_configuration));
+}
+
+static uint8_t
+intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
+
+static uint8_t
+intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+
+static void
+intel_get_adjust_train(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ uint8_t train_set[4])
+{
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= I830_DP_VOLTAGE_MAX)
+ v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= intel_dp_pre_emphasis_max(v))
+ p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+static uint32_t
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
+{
+ uint32_t signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+static uint8_t
+intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = intel_dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_output *intel_output,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat,
+ uint8_t train_set[4],
+ bool first)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int ret;
+
+ I915_WRITE(dp_priv->output_reg, dp_reg_value);
+ POSTING_READ(dp_priv->output_reg);
+ if (first)
+ intel_wait_for_vblank(dev);
+
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ ret = intel_dp_aux_native_write(intel_output,
+ DP_TRAINING_LANE0_SET, train_set, 4);
+ if (ret != 4)
+ return false;
+
+ return true;
+}
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ bool channel_eq = false;
+ bool first = true;
+ int tries;
+
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_output, 0x100,
+ link_configuration, DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+ DP &= ~DP_LINK_TRAIN_MASK;
+ memset(train_set, 0, 4);
+ voltage = 0xff;
+ tries = 0;
+ clock_recovery = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
+ DP_TRAINING_PATTERN_1, train_set, first))
+ break;
+ first = false;
+ /* Set training pattern 1 */
+
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < dp_priv->lane_count; i++)
+ if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == dp_priv->lane_count)
+ break;
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ }
+
+ /* channel equalization */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
+ DP_TRAINING_PATTERN_2, train_set,
+ false))
+ break;
+
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (tries > 5)
+ break;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ ++tries;
+ }
+
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ POSTING_READ(dp_priv->output_reg);
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(dp_priv->output_reg);
+}
+
+static void
+intel_dp_restore(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->save_DP & DP_PORT_EN)
+ intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
+ else
+ intel_dp_link_down(intel_output, dp_priv->save_DP);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ */
+
+static void
+intel_dp_check_link_status(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_output->enc.crtc)
+ return;
+
+ if (!intel_dp_get_link_status(intel_output, link_status)) {
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ return;
+ }
+
+ if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t temp, bit;
+ enum drm_connector_status status;
+
+ dp_priv->has_audio = false;
+
+ temp = I915_READ(PORT_HOTPLUG_EN);
+
+ I915_WRITE(PORT_HOTPLUG_EN,
+ temp |
+ DPB_HOTPLUG_INT_EN |
+ DPC_HOTPLUG_INT_EN |
+ DPD_HOTPLUG_INT_EN);
+
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ switch (dp_priv->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_INT_STATUS;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_INT_STATUS;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_INT_STATUS;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+
+ if ((temp & bit) == 0)
+ return connector_status_disconnected;
+
+ status = connector_status_disconnected;
+ if (intel_dp_aux_native_read(intel_output,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ {
+ if (dp_priv->dpcd[0] != 0)
+ status = connector_status_connected;
+ }
+ return status;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+ return intel_ddc_get_modes(intel_output);
+}
+
+static void
+intel_dp_destroy (struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->i2c_bus)
+ intel_i2c_destroy(intel_output->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_output);
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .dpms = intel_dp_dpms,
+ .mode_fixup = intel_dp_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_dp_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = intel_dp_save,
+ .restore = intel_dp_restore,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_dp_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_enc_destroy,
+};
+
+void
+intel_dp_hot_plug(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_output);
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+ struct intel_dp_priv *dp_priv;
+
+ intel_output = kcalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ dp_priv = (struct intel_dp_priv *)(intel_output + 1);
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ dp_priv->intel_output = intel_output;
+ dp_priv->output_reg = output_reg;
+ dp_priv->has_audio = false;
+ dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
+ intel_output->dev_priv = dp_priv;
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+ intel_dp_i2c_init(intel_output,
+ (output_reg == DP_B) ? "DPDDC-B" :
+ (output_reg == DP_C) ? "DPDDC-C" : "DPDDC-D");
+ intel_output->ddc_bus = &dp_priv->adapter;
+ intel_output->hot_plug = intel_dp_hot_plug;
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 00000000000..2b38054d3b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DP_H_
+#define _INTEL_DP_H_
+
+/* From the VESA DisplayPort spec */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes);
+};
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+
+#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
new file mode 100644
index 00000000000..4e60f14b1a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_i2c.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include "intel_dp.h"
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (;;) {
+ ret = (*algo_data->aux_ch)(adapter,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ printk(KERN_ERR "aux_ch failed %d\n", ret);
+ return ret;
+ }
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ printk(KERN_ERR "aux_ch nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ printk(KERN_ERR "aux_ch defer\n");
+ udelay(100);
+ break;
+ default:
+ printk(KERN_ERR "aux_ch invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ printk(KERN_ERR "dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+ error = i2c_add_adapter(adapter);
+ return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd4b9c5f715..004541c935a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -54,6 +54,7 @@
#define INTEL_OUTPUT_LVDS 4
#define INTEL_OUTPUT_TVOUT 5
#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -65,7 +66,6 @@ struct intel_i2c_chan {
u32 reg; /* GPIO reg */
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
- u8 slave_addr;
};
struct intel_framebuffer {
@@ -79,11 +79,12 @@ struct intel_output {
struct drm_encoder enc;
int type;
- struct intel_i2c_chan *i2c_bus; /* for control functions */
- struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
+ struct i2c_adapter *i2c_bus;
+ struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv;
+ void (*hot_plug)(struct intel_output *);
};
struct intel_crtc {
@@ -104,9 +105,9 @@ struct intel_crtc {
#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct intel_i2c_chan *chan);
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name);
+void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
@@ -116,6 +117,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1ee3007d6ec..13bff20930e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev)
{
struct intel_output *intel_output;
struct intel_dvo_device *dvo;
- struct intel_i2c_chan *i2cbus = NULL;
+ struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
- int gpio_inited = 0;
int encoder_type = DRM_MODE_ENCODER_NONE;
intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
@@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev)
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (gpio_inited != gpio) {
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
- gpio_inited = gpio;
+ if (i2cbus != NULL)
+ intel_i2c_destroy(i2cbus);
+ if (!(i2cbus = intel_i2c_create(dev, gpio,
+ gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+ continue;
}
if (dvo->dev_ops!= NULL)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4ea2a651b92..9e30daae37d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox = SDVO_ENCODING_HDMI |
SDVO_BORDER_ENABLE |
SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH |
- SDVO_NULL_PACKETS_DURING_VSYNC;
+ SDVO_HSYNC_ACTIVE_HIGH;
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -129,20 +129,26 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void
-intel_hdmi_sink_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_hdmi_edid_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- if (edid != NULL) {
- hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
- kfree(edid);
+ intel_output->ddc_bus);
+ hdmi_priv->has_hdmi_sink = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ }
intel_output->base.display_info.raw_edid = NULL;
+ kfree(edid);
}
+ return status;
}
static enum drm_connector_status
@@ -154,11 +160,7 @@ igdng_hdmi_detect(struct drm_connector *connector)
/* FIXME hotplug detect */
hdmi_priv->has_hdmi_sink = false;
- intel_hdmi_sink_detect(connector);
- if (hdmi_priv->has_hdmi_sink)
- return connector_status_connected;
- else
- return connector_status_disconnected;
+ return intel_hdmi_edid_detect(connector);
}
static enum drm_connector_status
@@ -201,10 +203,9 @@ intel_hdmi_detect(struct drm_connector *connector)
return connector_status_unknown;
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
- intel_hdmi_sink_detect(connector);
- return connector_status_connected;
- } else
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0)
+ return intel_hdmi_edid_detect(connector);
+ else
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index f7061f68d05..62b8bead765 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,6 +124,7 @@ static void set_data(void *data, int state_high)
* @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
+ * @slave_addr: slave address (if fixed)
*
* Creates and registers a new i2c bus with the Linux i2c layer, for use
* in output probing and control (e.g. DDC or SDVO control functions).
@@ -139,8 +140,8 @@ static void set_data(void *data, int state_high)
* %GPIOH
* see PRM for details on how these different busses are used.
*/
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name)
{
struct intel_i2c_chan *chan;
@@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
intel_i2c_quirk_set(dev, false);
udelay(20);
- return chan;
+ return &chan->adapter;
out_free:
kfree(chan);
@@ -187,11 +188,16 @@ out_free:
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
-void intel_i2c_destroy(struct intel_i2c_chan *chan)
+void intel_i2c_destroy(struct i2c_adapter *adapter)
{
- if (!chan)
+ struct intel_i2c_chan *chan;
+
+ if (!adapter)
return;
+ chan = container_of(adapter,
+ struct intel_i2c_chan,
+ adapter);
i2c_del_adapter(&chan->adapter);
kfree(chan);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f073ed8432e..9564ca44a97 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -39,6 +39,21 @@
#define I915_LVDS "i915_lvds"
+/*
+ * the following four scaling options are defined.
+ * #define DRM_MODE_SCALE_NON_GPU 0
+ * #define DRM_MODE_SCALE_FULLSCREEN 1
+ * #define DRM_MODE_SCALE_NO_SCALE 2
+ * #define DRM_MODE_SCALE_ASPECT 3
+ */
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_priv {
+ int fitting_mode;
+ u32 pfit_control;
+ u32 pfit_pgm_ratios;
+};
+
/**
* Sets the backlight level.
*
@@ -213,10 +228,27 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ /*
+ * float point operation is not supported . So the PANEL_RATIO_FACTOR
+ * is defined, which can avoid the float point computation when
+ * calculating the panel ratio.
+ */
+#define PANEL_RATIO_FACTOR 8192
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0;
+ int left_border = 0, right_border = 0, top_border = 0;
+ int bottom_border = 0;
+ bool border = 0;
+ int panel_ratio, desired_ratio, vert_scale, horiz_scale;
+ int horiz_ratio, vert_ratio;
+ u32 hsync_width, vsync_width;
+ u32 hblank_width, vblank_width;
+ u32 hsync_pos, vsync_pos;
/* Should never happen!! */
if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -232,7 +264,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
}
-
+ /* If we don't have a panel mode, there is nothing we can do */
+ if (dev_priv->panel_fixed_mode == NULL)
+ return true;
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -256,6 +290,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
+ /* Make sure pre-965s set dither correctly */
+ if (!IS_I965G(dev)) {
+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay) {
+ pfit_pgm_ratios = 0;
+ border = 0;
+ goto out;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ if (IS_I965G(dev))
+ pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY;
+
+ hsync_width = adjusted_mode->crtc_hsync_end -
+ adjusted_mode->crtc_hsync_start;
+ vsync_width = adjusted_mode->crtc_vsync_end -
+ adjusted_mode->crtc_vsync_start;
+ hblank_width = adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start;
+ vblank_width = adjusted_mode->crtc_vblank_end -
+ adjusted_mode->crtc_vblank_start;
+ /*
+ * Deal with panel fitting options. Figure out how to stretch the
+ * image based on its aspect ratio & the current panel fitting mode.
+ */
+ panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
+ adjusted_mode->vdisplay;
+ desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
+ mode->vdisplay;
+ /*
+ * Enable automatic panel scaling for non-native modes so that they fill
+ * the screen. Should be enabled before the pipe is enabled, according
+ * to register description and PRM.
+ * Change the value here to see the borders for debugging
+ */
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+
+ switch (lvds_priv->fitting_mode) {
+ case DRM_MODE_SCALE_NO_SCALE:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1)
+ right_border++;
+ top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ /* Set active & border values */
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ /* Keep the boder be even */
+ if (right_border & 1)
+ right_border++;
+ /* use the border directly instead of border minuse one */
+ adjusted_mode->crtc_hblank_start = mode->hdisplay +
+ right_border;
+ /* keep the blank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start + hblank_width;
+ /* get the hsync pos relative to hblank start */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start + hsync_pos;
+ /* keep the hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start + hsync_width;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
+ /* use the border instead of border minus one */
+ adjusted_mode->crtc_vblank_start = mode->vdisplay +
+ bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start + vblank_width;
+ /* get the vsync start postion relative to vblank start */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start + vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vblank_start + vsync_width;
+ border = 1;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the spect ratio */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev)) {
+ /* 965+ is easy, it does everything in hw */
+ if (panel_ratio > desired_ratio)
+ pfit_control |= PFIT_SCALING_PILLAR;
+ else if (panel_ratio < desired_ratio)
+ pfit_control |= PFIT_SCALING_LETTER;
+ else
+ pfit_control |= PFIT_SCALING_AUTO;
+ } else {
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ u32 horiz_bits, vert_bits, bits = 12;
+ horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->hdisplay;
+ vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->vdisplay;
+ horiz_scale = adjusted_mode->hdisplay *
+ PANEL_RATIO_FACTOR / mode->hdisplay;
+ vert_scale = adjusted_mode->vdisplay *
+ PANEL_RATIO_FACTOR / mode->vdisplay;
+
+ /* retain aspect ratio */
+ if (panel_ratio > desired_ratio) { /* Pillar */
+ u32 scaled_width;
+ scaled_width = mode->hdisplay * vert_scale /
+ PANEL_RATIO_FACTOR;
+ horiz_ratio = vert_ratio;
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Pillar will have left/right borders */
+ left_border = (adjusted_mode->hdisplay -
+ scaled_width) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1) /* odd resolutions */
+ right_border++;
+ /* keep the border be even */
+ if (right_border & 1)
+ right_border++;
+ adjusted_mode->crtc_hdisplay = scaled_width;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_hblank_start =
+ scaled_width + right_border;
+ /* keep the hblank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start +
+ hblank_width;
+ /*
+ * get the hsync start pos relative to
+ * hblank start
+ */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync_pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start +
+ hsync_pos;
+ /* keept hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start +
+ hsync_width;
+ border = 1;
+ } else if (panel_ratio < desired_ratio) { /* letter */
+ u32 scaled_height = mode->vdisplay *
+ horiz_scale / PANEL_RATIO_FACTOR;
+ vert_ratio = horiz_ratio;
+ pfit_control |= (HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Letterbox will have top/bottom border */
+ top_border = (adjusted_mode->vdisplay -
+ scaled_height) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ adjusted_mode->crtc_vdisplay = scaled_height;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_vblank_start =
+ scaled_height + bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start +
+ vblank_width;
+ /*
+ * get the vsync start pos relative to
+ * vblank start
+ */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start +
+ vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vsync_start +
+ vsync_width;
+ border = 1;
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ horiz_bits = (1 << bits) * horiz_ratio /
+ PANEL_RATIO_FACTOR;
+ vert_bits = (1 << bits) * vert_ratio /
+ PANEL_RATIO_FACTOR;
+ pfit_pgm_ratios =
+ ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
+ PFIT_VERT_SCALE_MASK) |
+ ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
+ PFIT_HORIZ_SCALE_MASK);
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev))
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ break;
+ default:
+ break;
+ }
+
+out:
+ lvds_priv->pfit_control = pfit_control;
+ lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@@ -301,8 +572,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 pfit_control;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -319,22 +590,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- if (mode->hdisplay != adjusted_mode->hdisplay ||
- mode->vdisplay != adjusted_mode->vdisplay)
- pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
- HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- else
- pfit_control = 0;
-
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
- else
- pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
-
- I915_WRITE(PFIT_CONTROL, pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
/**
@@ -406,6 +663,34 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
+ struct drm_device *dev = connector->dev;
+ struct intel_output *intel_output =
+ to_intel_output(connector);
+
+ if (property == dev->mode_config.scaling_mode_property &&
+ connector->encoder) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ if (value == DRM_MODE_SCALE_NON_GPU) {
+ DRM_DEBUG_KMS(I915_LVDS,
+ "non_GPU property is unsupported\n");
+ return 0;
+ }
+ if (lvds_priv->fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+ }
+ lvds_priv->fitting_mode = value;
+ if (crtc && crtc->enabled) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+ }
+
return 0;
}
@@ -456,7 +741,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
},
},
@@ -464,7 +749,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core 2 series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
},
},
@@ -518,6 +803,7 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
+ struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -531,7 +817,8 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ intel_output = kzalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_lvds_priv), GFP_KERNEL);
if (!intel_output) {
return;
}
@@ -553,7 +840,18 @@ void intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
+ intel_output->dev_priv = lvds_priv;
+ /* create the scaling mode property */
+ drm_mode_create_scaling_mode_property(dev);
+ /*
+ * the initial panel fitting mode will be FULL_SCREEN.
+ */
+ drm_connector_attach_property(&intel_output->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -649,5 +947,5 @@ failed:
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(intel_output);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index e0910fefce8..67e2f4632a2 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
}
};
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
-
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (ret == 2)
return true;
@@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (edid) {
drm_mode_connector_update_edid_property(&intel_output->base,
edid);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9a00adb3a50..f03473779fe 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -38,8 +38,7 @@
#undef SDVO_DEBUG
#define I915_SDVO "i915_sdvo"
struct intel_sdvo_priv {
- struct intel_i2c_chan *i2c_bus;
- int slaveaddr;
+ u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
int output_device;
@@ -146,13 +145,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
@@ -162,7 +161,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
out_buf[0] = addr;
out_buf[1] = 0;
- if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -175,10 +174,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
u8 ch)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
u8 out_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_output->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -188,7 +188,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
+ if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
{
return true;
}
@@ -1369,9 +1369,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct edid *edid = NULL;
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
+ intel_output->ddc_bus);
if (edid != NULL) {
sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
kfree(edid);
@@ -1549,7 +1548,6 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
/*
@@ -1557,8 +1555,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- /* set the bus switch and get the modes */
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
intel_ddc_get_modes(intel_output);
if (list_empty(&connector->probed_modes) == false)
return;
@@ -1709,7 +1705,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
- if (to_intel_output(connector)->ddc_bus == chan) {
+ if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
intel_output = to_intel_output(connector);
break;
}
@@ -1723,7 +1719,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
- struct i2c_algorithm *algo;
+ const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
intel_output =
@@ -1733,7 +1729,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
return -EINVAL;
sdvo_priv = intel_output->dev_priv;
- algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
+ algo = intel_output->i2c_bus->algo;
intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
@@ -1785,13 +1781,11 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
- struct intel_i2c_chan *i2cbus = NULL;
- struct intel_i2c_chan *ddcbus = NULL;
+
int connector_type;
u8 ch[0x40];
int i;
- int encoder_type, output_id;
- u8 slave_addr;
+ int encoder_type;
intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_output) {
@@ -1799,29 +1793,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
}
sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
+ sdvo_priv->output_device = output_device;
+
+ intel_output->dev_priv = sdvo_priv;
intel_output->type = INTEL_OUTPUT_SDVO;
/* setup the DDC bus. */
if (output_device == SDVOB)
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
else
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
- if (!i2cbus)
+ if (!intel_output->i2c_bus)
goto err_inteloutput;
- slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- sdvo_priv->i2c_bus = i2cbus;
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- if (output_device == SDVOB) {
- output_id = 1;
- } else {
- output_id = 2;
- }
- sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
- sdvo_priv->output_device = output_device;
- intel_output->i2c_bus = i2cbus;
- intel_output->dev_priv = sdvo_priv;
+ /* Save the bit-banging i2c functionality for use by the DDC wrapper */
+ intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
@@ -1835,17 +1824,15 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
/* setup the DDC bus. */
if (output_device == SDVOB)
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
else
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- if (ddcbus == NULL)
+ if (intel_output->ddc_bus == NULL)
goto err_i2c;
- intel_sdvo_i2c_bit_algo.functionality =
- intel_output->i2c_bus->adapter.algo->functionality;
- ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
- intel_output->ddc_bus = ddcbus;
+ /* Wrap with our custom algo which switches to DDC mode */
+ intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
/* In defaut case sdvo lvds is false */
sdvo_priv->is_lvds = false;
@@ -1965,9 +1952,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
return true;
err_i2c:
- if (ddcbus != NULL)
+ if (intel_output->ddc_bus != NULL)
intel_i2c_destroy(intel_output->ddc_bus);
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_output->i2c_bus != NULL)
+ intel_i2c_destroy(intel_output->i2c_bus);
err_inteloutput:
kfree(intel_output);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea68992e441..a43c98e3f07 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
/*
* Detect TV by polling)
*/
- if (intel_output->load_detect_temp) {
- /* TV not currently running, prod it with destructive detect */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
- tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
- tv_dac |= (TVDAC_STATE_CHG_EN |
- TVDAC_A_SENSE_CTL |
- TVDAC_B_SENSE_CTL |
- TVDAC_C_SENSE_CTL |
- DAC_CTL_OVERRIDE |
- DAC_A_0_7_V |
- DAC_B_0_7_V |
- DAC_C_0_7_V);
- I915_WRITE(TV_CTL, tv_ctl);
- I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev);
- tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev);
- }
+ save_tv_dac = tv_dac;
+ tv_ctl = I915_READ(TV_CTL);
+ save_tv_ctl = tv_ctl;
+ tv_ctl &= ~TV_ENC_ENABLE;
+ tv_ctl &= ~TV_TEST_MODE_MASK;
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ tv_dac &= ~TVDAC_SENSE_MASK;
+ tv_dac &= ~DAC_A_MASK;
+ tv_dac &= ~DAC_B_MASK;
+ tv_dac &= ~DAC_C_MASK;
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ intel_wait_for_vblank(dev);
+ tv_dac = I915_READ(TV_DAC);
+ I915_WRITE(TV_DAC, save_tv_dac);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ intel_wait_for_vblank(dev);
/*
* A B C
* 0 1 1 Composite
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f30aa7274a5..f97563db4e5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -35,6 +35,23 @@
#include "atom.h"
/*
+ * Clear GPU surface registers.
+ */
+static void radeon_surface_init(struct radeon_device *rdev)
+{
+ /* FIXME: check this out */
+ if (rdev->family < CHIP_R600) {
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ WREG32(RADEON_SURFACE0_INFO +
+ i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
+ 0);
+ }
+ }
+}
+
+/*
* GPU scratch registers helpers function.
*/
static void radeon_scratch_init(struct radeon_device *rdev)
@@ -496,6 +513,8 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_errata(rdev);
/* Initialize scratch registers */
radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
@@ -604,9 +623,6 @@ int radeon_device_init(struct radeon_device *rdev,
if (r) {
return r;
}
- if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
- rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
- }
if (!ret) {
DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 09c9fb9f621..84ba69f4878 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -345,7 +345,7 @@ static void __exit radeon_exit(void)
drm_exit(driver);
}
-late_initcall(radeon_init);
+module_init(radeon_init);
module_exit(radeon_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fa86d398945..9e8f191eb64 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -478,14 +478,16 @@ int radeonfb_create(struct radeon_device *rdev,
{
struct fb_info *info;
struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL;
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_object *robj = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
+ u64 fb_gpuaddr;
void *fbptr = NULL;
+ unsigned long tmp;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
@@ -498,11 +500,12 @@ int radeonfb_create(struct radeon_device *rdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- false, &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ false, &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer\n");
+ printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
+ surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
@@ -515,12 +518,19 @@ int radeonfb_create(struct radeon_device *rdev,
ret = -ENOMEM;
goto out_unref;
}
+ ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ printk(KERN_ERR "failed to pin framebuffer\n");
+ ret = -ENOMEM;
+ goto out_unref;
+ }
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
rfb = to_radeon_framebuffer(fb);
*rfb_p = rfb;
rdev->fbdev_rfb = rfb;
+ rdev->fbdev_robj = robj;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -541,13 +551,13 @@ int radeonfb_create(struct radeon_device *rdev,
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_I830;
+ info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
info->fix.line_length = fb->pitch;
- info->screen_base = fbptr;
- info->fix.smem_start = (unsigned long)fbptr;
+ tmp = fb_gpuaddr - rdev->mc.vram_location;
+ info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = size;
info->screen_base = fbptr;
info->screen_size = size;
@@ -562,8 +572,8 @@ int radeonfb_create(struct radeon_device *rdev,
info->var.width = -1;
info->var.xres = fb_width;
info->var.yres = fb_height;
- info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
- info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
@@ -644,7 +654,7 @@ out_unref:
if (robj) {
radeon_object_kunmap(robj);
}
- if (ret) {
+ if (fb && ret) {
list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
@@ -813,6 +823,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
robj = rfb->obj->driver_private;
unregister_framebuffer(info);
radeon_object_kunmap(robj);
+ radeon_object_unpin(robj);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 983e8df5e00..bac0d06c52a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -223,7 +223,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
{
uint32_t flags;
uint32_t tmp;
- void *fbptr;
int r;
flags = radeon_object_flags_from_domain(domain);
@@ -242,10 +241,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
return r;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
tmp = robj->tobj.mem.placement;
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
@@ -261,23 +256,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
DRM_ERROR("radeon: failed to pin object.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
return r;
}
void radeon_object_unpin(struct radeon_object *robj)
{
uint32_t flags;
- void *fbptr;
int r;
spin_lock(&robj->tobj.lock);
@@ -297,10 +281,6 @@ void radeon_object_unpin(struct radeon_object *robj)
DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
return;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
flags = robj->tobj.mem.placement;
robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
r = ttm_buffer_object_validate(&robj->tobj,
@@ -310,16 +290,6 @@ void radeon_object_unpin(struct radeon_object *robj)
DRM_ERROR("radeon: failed to unpin buffer.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
}
int radeon_object_wait(struct radeon_object *robj)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 517c8455963..bdec583901e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -34,7 +34,6 @@
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/module.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 27b146c54fb..40b75032ea4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -32,7 +32,6 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <linux/mm.h>
-#include <linux/version.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0331fa74cd3..75dc8bd2459 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,7 +28,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/highmem.h>
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3c259ee7ddd..8206442fbab 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -326,6 +326,16 @@ config I2C_DAVINCI
devices such as DaVinci NIC.
For details please see http://www.ti.com/davinci
+config I2C_DESIGNWARE
+ tristate "Synopsys DesignWare"
+ depends on HAVE_CLK
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-designware.
+
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
depends on GENERIC_GPIO
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index edeabf00310..e654263bfc0 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
+obj-$(CONFIG_I2C_DESIGNWARE) += i2c-designware.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c
new file mode 100644
index 00000000000..b444762e9b9
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware.c
@@ -0,0 +1,624 @@
+/*
+ * Synopsys Designware I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * Registers offset
+ */
+#define DW_IC_CON 0x0
+#define DW_IC_TAR 0x4
+#define DW_IC_DATA_CMD 0x10
+#define DW_IC_SS_SCL_HCNT 0x14
+#define DW_IC_SS_SCL_LCNT 0x18
+#define DW_IC_FS_SCL_HCNT 0x1c
+#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_INTR_STAT 0x2c
+#define DW_IC_INTR_MASK 0x30
+#define DW_IC_CLR_INTR 0x40
+#define DW_IC_ENABLE 0x6c
+#define DW_IC_STATUS 0x70
+#define DW_IC_TXFLR 0x74
+#define DW_IC_RXFLR 0x78
+#define DW_IC_COMP_PARAM_1 0xf4
+#define DW_IC_TX_ABRT_SOURCE 0x80
+
+#define DW_IC_CON_MASTER 0x1
+#define DW_IC_CON_SPEED_STD 0x2
+#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_10BITADDR_MASTER 0x10
+#define DW_IC_CON_RESTART_EN 0x20
+#define DW_IC_CON_SLAVE_DISABLE 0x40
+
+#define DW_IC_INTR_TX_EMPTY 0x10
+#define DW_IC_INTR_TX_ABRT 0x40
+#define DW_IC_INTR_STOP_DET 0x200
+
+#define DW_IC_STATUS_ACTIVITY 0x1
+
+#define DW_IC_ERR_TX_ABRT 0x1
+
+/*
+ * status codes
+ */
+#define STATUS_IDLE 0x0
+#define STATUS_WRITE_IN_PROGRESS 0x1
+#define STATUS_READ_IN_PROGRESS 0x2
+
+#define TIMEOUT 20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK 0
+#define ABRT_10ADDR1_NOACK 1
+#define ABRT_10ADDR2_NOACK 2
+#define ABRT_TXDATA_NOACK 3
+#define ABRT_GCALL_NOACK 4
+#define ABRT_GCALL_READ 5
+#define ABRT_SBYTE_ACKDET 7
+#define ABRT_SBYTE_NORSTRT 9
+#define ABRT_10B_RD_NORSTRT 10
+#define ARB_MASTER_DIS 11
+#define ARB_LOST 12
+
+static char *abort_sources[] = {
+ [ABRT_7B_ADDR_NOACK] =
+ "slave address not acknowledged (7bit mode)",
+ [ABRT_10ADDR1_NOACK] =
+ "first address byte not acknowledged (10bit mode)",
+ [ABRT_10ADDR2_NOACK] =
+ "second address byte not acknowledged (10bit mode)",
+ [ABRT_TXDATA_NOACK] =
+ "data not acknowledged",
+ [ABRT_GCALL_NOACK] =
+ "no acknowledgement for a general call",
+ [ABRT_GCALL_READ] =
+ "read after general call",
+ [ABRT_SBYTE_ACKDET] =
+ "start byte acknowledged",
+ [ABRT_SBYTE_NORSTRT] =
+ "trying to send start byte when restart is disabled",
+ [ABRT_10B_RD_NORSTRT] =
+ "trying to read when restart is disabled (10bit mode)",
+ [ARB_MASTER_DIS] =
+ "trying to use disabled adapter",
+ [ARB_LOST] =
+ "lost arbitration",
+};
+
+/**
+ * struct dw_i2c_dev - private i2c-designware data
+ * @dev: driver model device node
+ * @base: IO registers pointer
+ * @cmd_complete: tx completion indicator
+ * @pump_msg: continue in progress transfers
+ * @lock: protect this struct and IO registers
+ * @clk: input reference clock
+ * @cmd_err: run time hadware error code
+ * @msgs: points to an array of messages currently being transfered
+ * @msgs_num: the number of elements in msgs
+ * @msg_write_idx: the element index of the current tx message in the msgs
+ * array
+ * @tx_buf_len: the length of the current tx buffer
+ * @tx_buf: the current tx buffer
+ * @msg_read_idx: the element index of the current rx message in the msgs
+ * array
+ * @rx_buf_len: the length of the current rx buffer
+ * @rx_buf: the current rx buffer
+ * @msg_err: error status of the current transfer
+ * @status: i2c master status, one of STATUS_*
+ * @abort_source: copy of the TX_ABRT_SOURCE register
+ * @irq: interrupt number for the i2c master
+ * @adapter: i2c subsystem adapter node
+ * @tx_fifo_depth: depth of the hardware tx fifo
+ * @rx_fifo_depth: depth of the hardware rx fifo
+ */
+struct dw_i2c_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct tasklet_struct pump_msg;
+ struct mutex lock;
+ struct clk *clk;
+ int cmd_err;
+ struct i2c_msg *msgs;
+ int msgs_num;
+ int msg_write_idx;
+ u16 tx_buf_len;
+ u8 *tx_buf;
+ int msg_read_idx;
+ u16 rx_buf_len;
+ u8 *rx_buf;
+ int msg_err;
+ unsigned int status;
+ u16 abort_source;
+ int irq;
+ struct i2c_adapter adapter;
+ unsigned int tx_fifo_depth;
+ unsigned int rx_fifo_depth;
+};
+
+/**
+ * i2c_dw_init() - initialize the designware i2c master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+static void i2c_dw_init(struct dw_i2c_dev *dev)
+{
+ u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
+ u16 ic_con;
+
+ /* Disable the adapter */
+ writeb(0, dev->base + DW_IC_ENABLE);
+
+ /* set standard and fast speed deviders for high/low periods */
+ writew((input_clock_khz * 40 / 10000)+1, /* std speed high, 4us */
+ dev->base + DW_IC_SS_SCL_HCNT);
+ writew((input_clock_khz * 47 / 10000)+1, /* std speed low, 4.7us */
+ dev->base + DW_IC_SS_SCL_LCNT);
+ writew((input_clock_khz * 6 / 10000)+1, /* fast speed high, 0.6us */
+ dev->base + DW_IC_FS_SCL_HCNT);
+ writew((input_clock_khz * 13 / 10000)+1, /* fast speed low, 1.3us */
+ dev->base + DW_IC_FS_SCL_LCNT);
+
+ /* configure the i2c master */
+ ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
+ writew(ic_con, dev->base + DW_IC_CON);
+}
+
+/*
+ * Waiting for bus not busy
+ */
+static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
+{
+ int timeout = TIMEOUT;
+
+ while (readb(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
+ if (timeout <= 0) {
+ dev_warn(dev->dev, "timeout waiting for bus ready\n");
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+/*
+ * Initiate low level master read/write transaction.
+ * This function is called from i2c_dw_xfer when starting a transfer.
+ * This function is also called from dw_i2c_pump_msg to continue a transfer
+ * that is longer than the size of the TX FIFO.
+ */
+static void
+i2c_dw_xfer_msg(struct i2c_adapter *adap)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msgs = dev->msgs;
+ int num = dev->msgs_num;
+ u16 ic_con, intr_mask;
+ int tx_limit = dev->tx_fifo_depth - readb(dev->base + DW_IC_TXFLR);
+ int rx_limit = dev->rx_fifo_depth - readb(dev->base + DW_IC_RXFLR);
+ u16 addr = msgs[dev->msg_write_idx].addr;
+ u16 buf_len = dev->tx_buf_len;
+
+ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+ /* Disable the adapter */
+ writeb(0, dev->base + DW_IC_ENABLE);
+
+ /* set the slave (target) address */
+ writew(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR);
+
+ /* if the slave address is ten bit address, enable 10BITADDR */
+ ic_con = readw(dev->base + DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ else
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ writew(ic_con, dev->base + DW_IC_CON);
+
+ /* Enable the adapter */
+ writeb(1, dev->base + DW_IC_ENABLE);
+ }
+
+ for (; dev->msg_write_idx < num; dev->msg_write_idx++) {
+ /* if target address has changed, we need to
+ * reprogram the target address in the i2c
+ * adapter when we are done with this transfer
+ */
+ if (msgs[dev->msg_write_idx].addr != addr)
+ return;
+
+ if (msgs[dev->msg_write_idx].len == 0) {
+ dev_err(dev->dev,
+ "%s: invalid message length\n", __func__);
+ dev->msg_err = -EINVAL;
+ return;
+ }
+
+ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+ /* new i2c_msg */
+ dev->tx_buf = msgs[dev->msg_write_idx].buf;
+ buf_len = msgs[dev->msg_write_idx].len;
+ }
+
+ while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
+ if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+ writew(0x100, dev->base + DW_IC_DATA_CMD);
+ rx_limit--;
+ } else
+ writew(*(dev->tx_buf++),
+ dev->base + DW_IC_DATA_CMD);
+ tx_limit--; buf_len--;
+ }
+ }
+
+ intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
+ if (buf_len > 0) { /* more bytes to be written */
+ intr_mask |= DW_IC_INTR_TX_EMPTY;
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ } else
+ dev->status &= ~STATUS_WRITE_IN_PROGRESS;
+ writew(intr_mask, dev->base + DW_IC_INTR_MASK);
+
+ dev->tx_buf_len = buf_len;
+}
+
+static void
+i2c_dw_read(struct i2c_adapter *adap)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msgs = dev->msgs;
+ int num = dev->msgs_num;
+ u16 addr = msgs[dev->msg_read_idx].addr;
+ int rx_valid = readw(dev->base + DW_IC_RXFLR);
+
+ for (; dev->msg_read_idx < num; dev->msg_read_idx++) {
+ u16 len;
+ u8 *buf;
+
+ if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
+ continue;
+
+ /* different i2c client, reprogram the i2c adapter */
+ if (msgs[dev->msg_read_idx].addr != addr)
+ return;
+
+ if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
+ len = msgs[dev->msg_read_idx].len;
+ buf = msgs[dev->msg_read_idx].buf;
+ } else {
+ len = dev->rx_buf_len;
+ buf = dev->rx_buf;
+ }
+
+ for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+ *buf++ = readb(dev->base + DW_IC_DATA_CMD);
+
+ if (len > 0) {
+ dev->status |= STATUS_READ_IN_PROGRESS;
+ dev->rx_buf_len = len;
+ dev->rx_buf = buf;
+ return;
+ } else
+ dev->status &= ~STATUS_READ_IN_PROGRESS;
+ }
+}
+
+/*
+ * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ */
+static int
+i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ int ret;
+
+ dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
+
+ mutex_lock(&dev->lock);
+
+ INIT_COMPLETION(dev->cmd_complete);
+ dev->msgs = msgs;
+ dev->msgs_num = num;
+ dev->cmd_err = 0;
+ dev->msg_write_idx = 0;
+ dev->msg_read_idx = 0;
+ dev->msg_err = 0;
+ dev->status = STATUS_IDLE;
+
+ ret = i2c_dw_wait_bus_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* start the transfers */
+ i2c_dw_xfer_msg(adap);
+
+ /* wait for tx to complete */
+ ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
+ if (ret == 0) {
+ dev_err(dev->dev, "controller timed out\n");
+ i2c_dw_init(dev);
+ ret = -ETIMEDOUT;
+ goto done;
+ } else if (ret < 0)
+ goto done;
+
+ if (dev->msg_err) {
+ ret = dev->msg_err;
+ goto done;
+ }
+
+ /* no error */
+ if (likely(!dev->cmd_err)) {
+ /* read rx fifo, and disable the adapter */
+ do {
+ i2c_dw_read(adap);
+ } while (dev->status & STATUS_READ_IN_PROGRESS);
+ writeb(0, dev->base + DW_IC_ENABLE);
+ ret = num;
+ goto done;
+ }
+
+ /* We have an error */
+ if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
+ unsigned long abort_source = dev->abort_source;
+ int i;
+
+ for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) {
+ dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
+ }
+ }
+ ret = -EIO;
+
+done:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static u32 i2c_dw_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static void dw_i2c_pump_msg(unsigned long data)
+{
+ struct dw_i2c_dev *dev = (struct dw_i2c_dev *) data;
+ u16 intr_mask;
+
+ i2c_dw_read(&dev->adapter);
+ i2c_dw_xfer_msg(&dev->adapter);
+
+ intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
+ if (dev->status & STATUS_WRITE_IN_PROGRESS)
+ intr_mask |= DW_IC_INTR_TX_EMPTY;
+ writew(intr_mask, dev->base + DW_IC_INTR_MASK);
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C interrupt
+ * occurs.
+ */
+static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
+{
+ struct dw_i2c_dev *dev = dev_id;
+ u16 stat;
+
+ stat = readw(dev->base + DW_IC_INTR_STAT);
+ dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
+ if (stat & DW_IC_INTR_TX_ABRT) {
+ dev->abort_source = readw(dev->base + DW_IC_TX_ABRT_SOURCE);
+ dev->cmd_err |= DW_IC_ERR_TX_ABRT;
+ dev->status = STATUS_IDLE;
+ } else if (stat & DW_IC_INTR_TX_EMPTY)
+ tasklet_schedule(&dev->pump_msg);
+
+ readb(dev->base + DW_IC_CLR_INTR); /* clear interrupts */
+ writew(0, dev->base + DW_IC_INTR_MASK); /* disable interrupts */
+ if (stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+ complete(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+static struct i2c_algorithm i2c_dw_algo = {
+ .master_xfer = i2c_dw_xfer,
+ .functionality = i2c_dw_func,
+};
+
+static int __devinit dw_i2c_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ struct resource *mem, *irq, *ioarea;
+ int r;
+
+ /* NOTE: driver uses the static register mapping */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -EINVAL;
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev) {
+ r = -ENOMEM;
+ goto err_release_region;
+ }
+
+ init_completion(&dev->cmd_complete);
+ tasklet_init(&dev->pump_msg, dw_i2c_pump_msg, (unsigned long) dev);
+ mutex_init(&dev->lock);
+ dev->dev = get_device(&pdev->dev);
+ dev->irq = irq->start;
+ platform_set_drvdata(pdev, dev);
+
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ r = -ENODEV;
+ goto err_free_mem;
+ }
+ clk_enable(dev->clk);
+
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (dev->base == NULL) {
+ dev_err(&pdev->dev, "failure mapping io resources\n");
+ r = -EBUSY;
+ goto err_unuse_clocks;
+ }
+ {
+ u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1);
+
+ dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
+ dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
+ }
+ i2c_dw_init(dev);
+
+ writew(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */
+ r = request_irq(dev->irq, i2c_dw_isr, 0, pdev->name, dev);
+ if (r) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
+ goto err_iounmap;
+ }
+
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
+ sizeof(adap->name));
+ adap->algo = &i2c_dw_algo;
+ adap->dev.parent = &pdev->dev;
+
+ adap->nr = pdev->id;
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(&pdev->dev, "failure adding adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_iounmap:
+ iounmap(dev->base);
+err_unuse_clocks:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+err_free_mem:
+ platform_set_drvdata(pdev, NULL);
+ put_device(&pdev->dev);
+ kfree(dev);
+err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return r;
+}
+
+static int __devexit dw_i2c_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ platform_set_drvdata(pdev, NULL);
+ i2c_del_adapter(&dev->adapter);
+ put_device(&pdev->dev);
+
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+
+ writeb(0, dev->base + DW_IC_ENABLE);
+ free_irq(dev->irq, dev);
+ kfree(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:i2c_designware");
+
+static struct platform_driver dw_i2c_driver = {
+ .remove = __devexit_p(dw_i2c_remove),
+ .driver = {
+ .name = "i2c_designware",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init dw_i2c_init_driver(void)
+{
+ return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index bd066bb9d61..09f98ed0731 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -135,6 +135,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
hw[0].irq = 14;
+ hw[1].irq = 15;
return ide_host_add(d, hws, 2, NULL);
}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 77f79d26b26..c509c991646 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -92,6 +92,11 @@ int ide_acpi_init(void)
return 0;
}
+bool ide_port_acpi(ide_hwif_t *hwif)
+{
+ return ide_noacpi == 0 && hwif->acpidata;
+}
+
/**
* ide_get_dev_handle - finds acpi_handle and PCI device.function
* @dev: device to locate
@@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive)
unsigned long gtf_address;
unsigned long obj_loc;
- if (ide_noacpi)
- return 0;
-
DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
@@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
struct acpi_buffer output;
union acpi_object *out_obj;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Setting up output buffer for _GTM */
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
@@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
struct ide_acpi_drive_link *master = &hwif->acpidata->master;
struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Give the GTM buffer + drive Identify data to the channel via the
* _STM method: */
/* setup input parameters buffer for _STM */
@@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
ide_drive_t *drive;
int i;
- if (ide_noacpi || ide_noacpi_psx)
+ if (ide_noacpi_psx)
return;
DEBPRINT("ENTER:\n");
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* channel first and then drives for power on and verse versa for power off */
if (on)
acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
@@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
drive->name, err);
}
- if (!ide_acpionboot) {
+ if (ide_noacpi || ide_acpionboot == 0) {
DEBPRINT("ACPI methods disabled on boot\n");
return;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 4a19686fcfe..6a9a769bffc 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
} else if (!blk_pc_request(rq)) {
ide_cd_request_sense_fixup(drive, cmd);
- /* complain if we still have data left to transfer */
+
uptodate = cmd->nleft ? 0 : 1;
- if (uptodate == 0)
+
+ /*
+ * suck out the remaining bytes from the drive in an
+ * attempt to complete the data xfer. (see BZ#13399)
+ */
+ if (!(stat & ATA_ERR) && !uptodate && thislen) {
+ ide_pio_bytes(drive, cmd, write, thislen);
+ uptodate = cmd->nleft ? 0 : 1;
+ }
+
+ if (!uptodate)
rq->cmd_flags |= REQ_FAILED;
}
goto out_end;
@@ -876,9 +886,12 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
return stat;
/*
- * Sanity check the given block size
+ * Sanity check the given block size, in so far as making
+ * sure the sectors_per_frame we give to the caller won't
+ * end up being bogus.
*/
blocklen = be32_to_cpu(capbuf.blocklen);
+ blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS;
switch (blocklen) {
case 512:
case 1024:
@@ -886,10 +899,9 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
case 4096:
break;
default:
- printk(KERN_ERR PFX "%s: weird block size %u\n",
+ printk_once(KERN_ERR PFX "%s: weird block size %u; "
+ "setting default block size to 2048\n",
drive->name, blocklen);
- printk(KERN_ERR PFX "%s: default to 2kb block size\n",
- drive->name);
blocklen = 2048;
break;
}
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 5bf958e5b1d..1099bf7cf96 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
err = setfunc(drive, *(int *)&rq->cmd[1]);
if (err)
rq->errors = err;
- ide_complete_rq(drive, err, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 219e6fb78dc..ee58c88dee5 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -361,9 +361,6 @@ static int ide_tune_dma(ide_drive_t *drive)
if (__ide_dma_bad_drive(drive))
return 0;
- if (ide_id_dma_bug(drive))
- return 0;
-
if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
return config_drive_for_dma(drive);
@@ -394,24 +391,6 @@ static int ide_dma_check(ide_drive_t *drive)
return -1;
}
-int ide_id_dma_bug(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- if (id[ATA_ID_FIELD_VALID] & 4) {
- if ((id[ATA_ID_UDMA_MODES] >> 8) &&
- (id[ATA_ID_MWDMA_MODES] >> 8))
- goto err_out;
- } else if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
- (id[ATA_ID_SWDMA_MODES] >> 8))
- goto err_out;
-
- return 0;
-err_out:
- printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
- return 1;
-}
-
int ide_set_dma(ide_drive_t *drive)
{
int rc;
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 2b914197961..e9abf2c3c33 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
}
}
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8b3f204f7d7..fefbdfc8db0 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -293,7 +293,7 @@ out_end:
drive->failed_pc = NULL;
if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1059f809b80..d5f3c77bead 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
}
}
-/* obsolete, blk_rq_bytes() should be used instead */
-unsigned int ide_rq_bytes(struct request *rq)
-{
- if (blk_pc_request(rq))
- return blk_rq_bytes(rq);
- else
- return blk_rq_cur_sectors(rq) << 9;
-}
-EXPORT_SYMBOL_GPL(ide_rq_bytes);
-
int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
{
ide_hwif_t *hwif = drive->hwif;
@@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
if ((media == ide_floppy || media == ide_tape) && drv_req) {
rq->errors = 0;
- ide_complete_rq(drive, 0, blk_rq_bytes(rq));
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
else if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
}
+
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
}
static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
@@ -476,10 +466,14 @@ void do_ide_request(struct request_queue *q)
if (!ide_lock_port(hwif)) {
ide_hwif_t *prev_port;
-
- WARN_ON_ONCE(hwif->rq);
repeat:
prev_port = hwif->host->cur_port;
+
+ if (drive->dev_flags & IDE_DFLAG_BLOCKED)
+ rq = hwif->rq;
+ else
+ WARN_ON_ONCE(hwif->rq);
+
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
ide_unlock_port(hwif);
@@ -506,43 +500,29 @@ repeat:
hwif->cur_dev = drive;
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
- spin_unlock_irq(&hwif->lock);
- spin_lock_irq(q->queue_lock);
- /*
- * we know that the queue isn't empty, but this can happen
- * if the q->prep_rq_fn() decides to kill a request
- */
- if (!rq)
+ if (rq == NULL) {
+ spin_unlock_irq(&hwif->lock);
+ spin_lock_irq(q->queue_lock);
+ /*
+ * we know that the queue isn't empty, but this can
+ * happen if ->prep_rq_fn() decides to kill a request
+ */
rq = blk_fetch_request(drive->queue);
+ spin_unlock_irq(q->queue_lock);
+ spin_lock_irq(&hwif->lock);
- spin_unlock_irq(q->queue_lock);
- spin_lock_irq(&hwif->lock);
-
- if (!rq) {
- ide_unlock_port(hwif);
- goto out;
+ if (rq == NULL) {
+ ide_unlock_port(hwif);
+ goto out;
+ }
}
/*
* Sanity: don't accept a request that isn't a PM request
- * if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the blk_fetch_request()
- * above to return us whatever is in the queue. Since we call
- * ide_do_request() ourselves, we end up taking requests while
- * the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
+ * if we are currently power managed.
*/
- if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
- blk_pm_request(rq) == 0 &&
- (rq->cmd_flags & REQ_PREEMPT) == 0) {
- /* there should be no pending command at this point */
- ide_unlock_port(hwif);
- goto plug_device;
- }
+ BUG_ON((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
+ blk_pm_request(rq) == 0);
hwif->rq = rq;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 82f252c3ee6..e246d3d3fbc 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
goto out;
}
- id = kmalloc(size, GFP_KERNEL);
+ /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
+ id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
if (id == NULL) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index fa047150a1c..2892b242bbe 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -210,6 +210,7 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
*/
static const struct drive_list_entry ivb_list[] = {
{ "QUANTUM FIREBALLlct10 05" , "A03.0900" },
+ { "QUANTUM FIREBALLlct20 30" , "APL.0900" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB00" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB01" },
{ "TSSTcorp CDDVDW SH-S202N" , "SB00" },
@@ -329,9 +330,6 @@ int ide_driveid_update(ide_drive_t *drive)
kfree(id);
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
- ide_dma_off(drive);
-
return 1;
out_err:
if (rc == 2)
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index c14ca144cff..ad7be2669dc 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
struct request_pm_state rqpm;
int ret;
- /* call ACPI _GTM only once */
- if ((drive->dn & 1) == 0 || pair == NULL)
- ide_acpi_get_timing(hwif);
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _GTM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL)
+ ide_acpi_get_timing(hwif);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
@@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
ret = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
- /* call ACPI _PS3 only after both devices are suspended */
- if (ret == 0 && ((drive->dn & 1) || pair == NULL))
- ide_acpi_set_state(hwif, 0);
+ if (ret == 0 && ide_port_acpi(hwif)) {
+ /* call ACPI _PS3 only after both devices are suspended */
+ if ((drive->dn & 1) || pair == NULL)
+ ide_acpi_set_state(hwif, 0);
+ }
return ret;
}
@@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev)
struct request_pm_state rqpm;
int err;
- /* call ACPI _PS0 / _STM only once */
- if ((drive->dn & 1) == 0 || pair == NULL) {
- ide_acpi_set_state(hwif, 1);
- ide_acpi_push_timing(hwif);
- }
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _PS0 / _STM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL) {
+ ide_acpi_set_state(hwif, 1);
+ ide_acpi_push_timing(hwif);
+ }
- ide_acpi_exec_tfs(drive);
+ ide_acpi_exec_tfs(drive);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 51af4eea0d3..1bb106f6221 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -818,6 +818,24 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
return j;
}
+static void ide_host_enable_irqs(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ /* clear any pending IRQs */
+ hwif->tp_ops->read_status(hwif);
+
+ /* unmask IRQs */
+ if (hwif->io_ports.ctl_addr)
+ hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
+ }
+}
+
/*
* This routine sets up the IRQ for an IDE interface.
*/
@@ -831,9 +849,6 @@ static int init_irq (ide_hwif_t *hwif)
if (irq_handler == NULL)
irq_handler = ide_intr;
- if (io_ports->ctl_addr)
- hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
goto out_up;
@@ -1404,6 +1419,8 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
ide_port_tune_devices(hwif);
}
+ ide_host_enable_irqs(host);
+
ide_host_for_each_port(i, hwif, host) {
if (hwif == NULL)
continue;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9b60b6b684d..7c8e7122aaa 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -75,6 +75,7 @@ config LEDS_ALIX2
depends on LEDS_CLASS && X86 && EXPERIMENTAL
help
This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
+ You have to set leds-alix2.force=1 for boards with Award BIOS.
config LEDS_H1940
tristate "LED Support for iPAQ H1940 device"
@@ -145,15 +146,16 @@ config LEDS_GPIO_OF
of_platform devices. For instance, LEDs which are listed in a "dts"
file.
-config LEDS_LP5521
- tristate "LED Support for the LP5521 LEDs"
+config LEDS_LP3944
+ tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
depends on LEDS_CLASS && I2C
help
- If you say 'Y' here you get support for the National Semiconductor
- LP5521 LED driver used in n8x0 boards.
+ This option enables support for LEDs connected to the National
+ Semiconductor LP3944 Lighting Management Unit (LMU) also known as
+ Fun Light Chip.
- This driver can be built as a module by choosing 'M'. The module
- will be called leds-lp5521.
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp3944.
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2d41c4dcf92..e8cdcf77a4c 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index ddbd7730dfc..731d4eef342 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -14,7 +14,7 @@
static int force = 0;
module_param(force, bool, 0444);
-MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
+MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
struct alix_led {
struct led_classdev cdev;
@@ -155,6 +155,11 @@ static int __init alix_led_init(void)
goto out;
}
+ /* enable output on GPIO for LED 1,2,3 */
+ outl(1 << 6, 0x6104);
+ outl(1 << 9, 0x6184);
+ outl(1 << 11, 0x6184);
+
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (!IS_ERR(pdev)) {
ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 4149ecb3a9b..779d7f262c0 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -97,6 +97,10 @@ struct bd2802_led {
enum led_ids led_id;
enum led_colors color;
enum led_bits state;
+
+ /* General attributes of RGB LEDs */
+ int wave_pattern;
+ int rgb_current;
};
@@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id,
bd2802_reset_cancel(led);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
@@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id,
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
- bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF);
+ bd2802_write_byte(led->client, reg, led->wave_pattern);
bd2802_enable(led, id);
bd2802_update_state(led, id, color, BD2802_BLINK);
@@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led)
ret = device_create_file(&led->client->dev,
bd2802_addr_attributes[i]);
if (ret) {
- dev_err(&led->client->dev, "failed to sysfs file %s\n",
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
bd2802_addr_attributes[i]->attr.name);
goto failed_remove_files;
}
@@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = {
.store = bd2802_store_adv_conf,
};
+#define BD2802_CONTROL_ATTR(attr_name, name_str) \
+static ssize_t bd2802_show_##attr_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ ssize_t ret; \
+ down_read(&led->rwsem); \
+ ret = sprintf(buf, "0x%02x\n", led->attr_name); \
+ up_read(&led->rwsem); \
+ return ret; \
+} \
+static ssize_t bd2802_store_##attr_name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ unsigned long val; \
+ int ret; \
+ if (!count) \
+ return -EINVAL; \
+ ret = strict_strtoul(buf, 16, &val); \
+ if (ret) \
+ return ret; \
+ down_write(&led->rwsem); \
+ led->attr_name = val; \
+ up_write(&led->rwsem); \
+ return count; \
+} \
+static struct device_attribute bd2802_##attr_name##_attr = { \
+ .attr = { \
+ .name = name_str, \
+ .mode = 0644, \
+ .owner = THIS_MODULE \
+ }, \
+ .show = bd2802_show_##attr_name, \
+ .store = bd2802_store_##attr_name, \
+};
+
+BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern");
+BD2802_CONTROL_ATTR(rgb_current, "rgb_current");
+
+static struct device_attribute *bd2802_attributes[] = {
+ &bd2802_adv_conf_attr,
+ &bd2802_wave_pattern_attr,
+ &bd2802_rgb_current_attr,
+};
+
static void bd2802_led_work(struct work_struct *work)
{
struct bd2802_led *led = container_of(work, struct bd2802_led, work);
@@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1r.brightness = LED_OFF;
led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness;
led->cdev_led1r.blink_set = bd2802_set_led1r_blink;
- led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1r);
if (ret < 0) {
@@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1g.brightness = LED_OFF;
led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness;
led->cdev_led1g.blink_set = bd2802_set_led1g_blink;
- led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1g);
if (ret < 0) {
@@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1b.brightness = LED_OFF;
led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness;
led->cdev_led1b.blink_set = bd2802_set_led1b_blink;
- led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1b);
if (ret < 0) {
@@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2r.brightness = LED_OFF;
led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness;
led->cdev_led2r.blink_set = bd2802_set_led2r_blink;
- led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2r);
if (ret < 0) {
@@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2g.brightness = LED_OFF;
led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness;
led->cdev_led2g.blink_set = bd2802_set_led2g_blink;
- led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2g);
if (ret < 0) {
@@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
{
struct bd2802_led *led;
struct bd2802_led_platform_data *pdata;
- int ret;
+ int ret, i;
led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
if (!led) {
@@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client,
/* To save the power, reset BD2802 after detecting */
gpio_set_value(led->pdata->reset_gpio, 0);
+ /* Default attributes */
+ led->wave_pattern = BD2802_PATTERN_HALF;
+ led->rgb_current = BD2802_CURRENT_032;
+
init_rwsem(&led->rwsem);
- ret = device_create_file(&client->dev, &bd2802_adv_conf_attr);
- if (ret) {
- dev_err(&client->dev, "failed to create sysfs file %s\n",
- bd2802_adv_conf_attr.attr.name);
- goto failed_free;
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
+ ret = device_create_file(&led->client->dev,
+ bd2802_attributes[i]);
+ if (ret) {
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
+ bd2802_attributes[i]->attr.name);
+ goto failed_unregister_dev_file;
+ }
}
ret = bd2802_register_led_classdev(led);
@@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client,
return 0;
failed_unregister_dev_file:
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i--; i >= 0; i--)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
failed_free:
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -697,12 +750,14 @@ failed_free:
static int __exit bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
+ int i;
- bd2802_unregister_led_classdev(led);
gpio_set_value(led->pdata->reset_gpio, 0);
+ bd2802_unregister_led_classdev(led);
if (led->adf_on)
bd2802_disable_adv_conf(led);
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client)
struct bd2802_led *led = i2c_get_clientdata(client);
if (!bd2802_is_all_off(led) || led->adf_on) {
- gpio_set_value(led->pdata->reset_gpio, 1);
- udelay(100);
+ bd2802_reset_cancel(led);
bd2802_restore_state(led);
}
@@ -762,4 +816,4 @@ module_exit(bd2802_exit);
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d2109054de8..6b06638eb5b 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
int (*blink_set)(unsigned, unsigned long *, unsigned long *))
{
- int ret;
+ int ret, state;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
@@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
led_dat->cdev.blink_set = gpio_blink_set;
}
led_dat->cdev.brightness_set = gpio_led_set;
- led_dat->cdev.brightness = LED_OFF;
+ if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
+ state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
+ else
+ state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low);
+ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
@@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led)
}
#ifdef CONFIG_LEDS_GPIO_PLATFORM
-static int gpio_led_probe(struct platform_device *pdev)
+static int __devinit gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct gpio_led_data *leds_data;
@@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
memset(&led, 0, sizeof(led));
for_each_child_of_node(np, child) {
enum of_gpio_flags flags;
+ const char *state;
led.gpio = of_get_gpio_flags(child, 0, &flags);
led.active_low = flags & OF_GPIO_ACTIVE_LOW;
led.name = of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "keep"))
+ led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+ else if(!strcmp(state, "on"))
+ led.default_state = LEDS_GPIO_DEFSTATE_ON;
+ else
+ led.default_state = LEDS_GPIO_DEFSTATE_OFF;
+ }
ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
&ofdev->dev, NULL);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
new file mode 100644
index 00000000000..5946208ba26
--- /dev/null
+++ b/drivers/leds/leds-lp3944.c
@@ -0,0 +1,466 @@
+/*
+ * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * I2C driver for National Semiconductor LP3944 Funlight Chip
+ * http://www.national.com/pf/LP/LP3944.html
+ *
+ * This helper chip can drive up to 8 leds, with two programmable DIM modes;
+ * it could even be used as a gpio expander but this driver assumes it is used
+ * as a led controller.
+ *
+ * The DIM modes are used to set _blink_ patterns for leds, the pattern is
+ * specified supplying two parameters:
+ * - period: from 0s to 1.6s
+ * - duty cycle: percentage of the period the led is on, from 0 to 100
+ *
+ * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+ * leds, the camera flash light and the displays backlights.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds-lp3944.h>
+
+/* Read Only Registers */
+#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */
+#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */
+
+#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */
+#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */
+#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */
+#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */
+#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */
+#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */
+
+/* These registers are not used to control leds in LP3944, they can store
+ * arbitrary values which the chip will ignore.
+ */
+#define LP3944_REG_REGISTER8 0x08
+#define LP3944_REG_REGISTER9 0x09
+
+#define LP3944_DIM0 0
+#define LP3944_DIM1 1
+
+/* period in ms */
+#define LP3944_PERIOD_MIN 0
+#define LP3944_PERIOD_MAX 1600
+
+/* duty cycle is a percentage */
+#define LP3944_DUTY_CYCLE_MIN 0
+#define LP3944_DUTY_CYCLE_MAX 100
+
+#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev)
+
+/* Saved data */
+struct lp3944_led_data {
+ u8 id;
+ enum lp3944_type type;
+ enum lp3944_status status;
+ struct led_classdev ldev;
+ struct i2c_client *client;
+ struct work_struct work;
+};
+
+struct lp3944_data {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct lp3944_led_data leds[LP3944_LEDS_MAX];
+};
+
+static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
+{
+ int tmp;
+
+ tmp = i2c_smbus_read_byte_data(client, reg);
+ if (tmp < 0)
+ return -EINVAL;
+
+ *value = tmp;
+
+ return 0;
+}
+
+static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+/**
+ * Set the period for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @period: period of a blink, that is a on/off cycle, expressed in ms.
+ */
+static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
+{
+ u8 psc_reg;
+ u8 psc_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ psc_reg = LP3944_REG_PSC0;
+ else if (dim == LP3944_DIM1)
+ psc_reg = LP3944_REG_PSC1;
+ else
+ return -EINVAL;
+
+ /* Convert period to Prescaler value */
+ if (period > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ psc_value = (period * 255) / LP3944_PERIOD_MAX;
+
+ err = lp3944_reg_write(client, psc_reg, psc_value);
+
+ return err;
+}
+
+/**
+ * Set the duty cycle for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @duty_cycle: percentage of a period during which a led is ON
+ */
+static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
+ u8 duty_cycle)
+{
+ u8 pwm_reg;
+ u8 pwm_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ pwm_reg = LP3944_REG_PWM0;
+ else if (dim == LP3944_DIM1)
+ pwm_reg = LP3944_REG_PWM1;
+ else
+ return -EINVAL;
+
+ /* Convert duty cycle to PWM value */
+ if (duty_cycle > LP3944_DUTY_CYCLE_MAX)
+ return -EINVAL;
+
+ pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX;
+
+ err = lp3944_reg_write(client, pwm_reg, pwm_value);
+
+ return err;
+}
+
+/**
+ * Set the led status
+ *
+ * @led: a lp3944_led_data structure
+ * @status: one of LP3944_LED_STATUS_OFF
+ * LP3944_LED_STATUS_ON
+ * LP3944_LED_STATUS_DIM0
+ * LP3944_LED_STATUS_DIM1
+ */
+static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
+{
+ struct lp3944_data *data = i2c_get_clientdata(led->client);
+ u8 id = led->id;
+ u8 reg;
+ u8 val = 0;
+ int err;
+
+ dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n",
+ __func__, led->ldev.name, status);
+
+ switch (id) {
+ case LP3944_LED0:
+ case LP3944_LED1:
+ case LP3944_LED2:
+ case LP3944_LED3:
+ reg = LP3944_REG_LS0;
+ break;
+ case LP3944_LED4:
+ case LP3944_LED5:
+ case LP3944_LED6:
+ case LP3944_LED7:
+ id -= LP3944_LED4;
+ reg = LP3944_REG_LS1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (status > LP3944_LED_STATUS_DIM1)
+ return -EINVAL;
+
+ /* invert only 0 and 1, leave unchanged the other values,
+ * remember we are abusing status to set blink patterns
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
+ status = 1 - status;
+
+ mutex_lock(&data->lock);
+ lp3944_reg_read(led->client, reg, &val);
+
+ val &= ~(LP3944_LED_STATUS_MASK << (id << 1));
+ val |= (status << (id << 1));
+
+ dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n",
+ __func__, led->ldev.name, reg, id, status, val);
+
+ /* set led status */
+ err = lp3944_reg_write(led->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int lp3944_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+ u16 period;
+ u8 duty_cycle;
+ int err;
+
+ /* units are in ms */
+ if (*delay_on + *delay_off > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ /* Special case: the leds subsystem requires a default user
+ * friendly blink pattern for the LED. Let's blink the led
+ * slowly (1Hz).
+ */
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ period = (*delay_on) + (*delay_off);
+
+ /* duty_cycle is the percentage of period during which the led is ON */
+ duty_cycle = 100 * (*delay_on) / period;
+
+ /* invert duty cycle for inverted leds, this has the same effect of
+ * swapping delay_on and delay_off
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED)
+ duty_cycle = 100 - duty_cycle;
+
+ /* NOTE: using always the first DIM mode, this means that all leds
+ * will have the same blinking pattern.
+ *
+ * We could find a way later to have two leds blinking in hardware
+ * with different patterns at the same time, falling back to software
+ * control for the other ones.
+ */
+ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period);
+ if (err)
+ return err;
+
+ err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle);
+ if (err)
+ return err;
+
+ dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n",
+ __func__);
+
+ led->status = LP3944_LED_STATUS_DIM0;
+ schedule_work(&led->work);
+
+ return 0;
+}
+
+static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+
+ dev_dbg(&led->client->dev, "%s: %s, %d\n",
+ __func__, led_cdev->name, brightness);
+
+ led->status = brightness;
+ schedule_work(&led->work);
+}
+
+static void lp3944_led_work(struct work_struct *work)
+{
+ struct lp3944_led_data *led;
+
+ led = container_of(work, struct lp3944_led_data, work);
+ lp3944_led_set(led, led->status);
+}
+
+static int lp3944_configure(struct i2c_client *client,
+ struct lp3944_data *data,
+ struct lp3944_platform_data *pdata)
+{
+ int i, err = 0;
+
+ for (i = 0; i < pdata->leds_size; i++) {
+ struct lp3944_led *pled = &pdata->leds[i];
+ struct lp3944_led_data *led = &data->leds[i];
+ led->client = client;
+ led->id = i;
+
+ switch (pled->type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led->type = pled->type;
+ led->status = pled->status;
+ led->ldev.name = pled->name;
+ led->ldev.max_brightness = 1;
+ led->ldev.brightness_set = lp3944_led_set_brightness;
+ led->ldev.blink_set = lp3944_led_set_blink;
+ led->ldev.flags = LED_CORE_SUSPENDRESUME;
+
+ INIT_WORK(&led->work, lp3944_led_work);
+ err = led_classdev_register(&client->dev, &led->ldev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led->ldev.name);
+ goto exit;
+ }
+
+ /* to expose the default value to userspace */
+ led->ldev.brightness = led->status;
+
+ /* Set the default led status */
+ err = lp3944_led_set(led, led->status);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "%s couldn't set STATUS %d\n",
+ led->ldev.name, led->status);
+ goto exit;
+ }
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+
+ }
+ }
+ return 0;
+
+exit:
+ if (i > 0)
+ for (i = i - 1; i >= 0; i--)
+ switch (pdata->leds[i].type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int __devinit lp3944_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
+ struct lp3944_data *data;
+
+ if (lp3944_pdata == NULL) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ /* Let's see whether this adapter can support what we need. */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "insufficient functionality!\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->lock);
+
+ dev_info(&client->dev, "lp3944 enabled\n");
+
+ lp3944_configure(client, data, lp3944_pdata);
+ return 0;
+}
+
+static int __devexit lp3944_remove(struct i2c_client *client)
+{
+ struct lp3944_platform_data *pdata = client->dev.platform_data;
+ struct lp3944_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->leds_size; i++)
+ switch (data->leds[i].type) {
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+/* lp3944 i2c driver struct */
+static const struct i2c_device_id lp3944_id[] = {
+ {"lp3944", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lp3944_id);
+
+static struct i2c_driver lp3944_driver = {
+ .driver = {
+ .name = "lp3944",
+ },
+ .probe = lp3944_probe,
+ .remove = __devexit_p(lp3944_remove),
+ .id_table = lp3944_id,
+};
+
+static int __init lp3944_module_init(void)
+{
+ return i2c_add_driver(&lp3944_driver);
+}
+
+static void __exit lp3944_module_exit(void)
+{
+ i2c_del_driver(&lp3944_driver);
+}
+
+module_init(lp3944_module_init);
+module_exit(lp3944_module_exit);
+
+MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
+MODULE_DESCRIPTION("LP3944 Fun Light Chip");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 3937244fdca..dba8921240f 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -35,7 +35,7 @@ struct pca9532_data {
struct pca9532_led leds[16];
struct mutex update_lock;
struct input_dev *idev;
- struct work_struct work;
+ struct work_struct work;
u8 pwm[2];
u8 psc[2];
};
@@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
if (b > 0xFF)
return -EINVAL;
data->pwm[pwm] = b;
- data->psc[pwm] = blink;
- return 0;
+ data->psc[pwm] = blink;
+ return 0;
}
static int pca9532_setpwm(struct i2c_client *client, int pwm)
{
- struct pca9532_data *data = i2c_get_clientdata(client);
- mutex_lock(&data->update_lock);
+ struct pca9532_data *data = i2c_get_clientdata(client);
+ mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
data->pwm[pwm]);
i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
@@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
led->state = PCA9532_ON;
else {
led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
- err = pca9532_calcpwm(led->client, 0, 0, value);
+ err = pca9532_calcpwm(led->client, 0, 0, value);
if (err)
return; /* XXX: led api doesn't allow error code? */
}
- schedule_work(&led->work);
+ schedule_work(&led->work);
}
static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
struct pca9532_led *led = ldev_to_led(led_cdev);
struct i2c_client *client = led->client;
int psc;
- int err = 0;
+ int err = 0;
if (*delay_on == 0 && *delay_off == 0) {
/* led subsystem ask us for a blink rate */
@@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
/* Thecus specific: only use PSC/PWM 0 */
psc = (*delay_on * 152-1)/1000;
- err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
- if (err)
- return err;
- schedule_work(&led->work);
- return 0;
+ err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+ if (err)
+ return err;
+ schedule_work(&led->work);
+ return 0;
}
static int pca9532_event(struct input_dev *dev, unsigned int type,
@@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
else
data->pwm[1] = 0;
- schedule_work(&data->work);
+ schedule_work(&data->work);
- return 0;
+ return 0;
}
static void pca9532_input_work(struct work_struct *work)
{
- struct pca9532_data *data;
- data = container_of(work, struct pca9532_data, work);
+ struct pca9532_data *data;
+ data = container_of(work, struct pca9532_data, work);
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
data->pwm[1]);
@@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work)
static void pca9532_led_work(struct work_struct *work)
{
- struct pca9532_led *led;
- led = container_of(work, struct pca9532_led, work);
- if (led->state == PCA9532_PWM0)
- pca9532_setpwm(led->client, 0);
- pca9532_setled(led);
+ struct pca9532_led *led;
+ led = container_of(work, struct pca9532_led, work);
+ if (led->state == PCA9532_PWM0)
+ pca9532_setpwm(led->client, 0);
+ pca9532_setled(led);
}
static int pca9532_configure(struct i2c_client *client,
@@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client,
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set = pca9532_set_brightness;
led->ldev.blink_set = pca9532_set_blink;
- INIT_WORK(&led->work, pca9532_led_work);
+ INIT_WORK(&led->work, pca9532_led_work);
err = led_classdev_register(&client->dev, &led->ldev);
if (err < 0) {
dev_err(&client->dev,
@@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
BIT_MASK(SND_TONE);
data->idev->event = pca9532_event;
input_set_drvdata(data->idev, data);
- INIT_WORK(&data->work, pca9532_input_work);
+ INIT_WORK(&data->work, pca9532_input_work);
err = input_register_device(data->idev);
if (err) {
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
goto exit;
}
@@ -283,13 +283,13 @@ exit:
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
@@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d4e8979735c..9c3138265f8 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -82,7 +82,7 @@ struct lg_cpu {
struct lg_eventfd {
unsigned long addr;
- struct file *event;
+ struct eventfd_ctx *event;
};
struct lg_eventfd_map {
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 32e29712105..9f9a2953b38 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
/* Now append new entry. */
new->map[new->num].addr = addr;
- new->map[new->num].event = eventfd_fget(fd);
+ new->map[new->num].event = eventfd_ctx_fdget(fd);
if (IS_ERR(new->map[new->num].event)) {
kfree(new);
return PTR_ERR(new->map[new->num].event);
@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file)
/* Release any eventfds they registered. */
for (i = 0; i < lg->eventfds->num; i++)
- fput(lg->eventfds->map[i].event);
+ eventfd_ctx_put(lg->eventfds->map[i].event);
kfree(lg->eventfds);
/* If lg->dead doesn't contain an error code it will be NULL or a
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 6e149f4a1ff..a0f68386c12 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
+#ifdef CONFIG_PCI
+ /* Set the DMA ops to the ones from the PCI device, this could be
+ * fishy if we didn't know that on PowerMac it's always direct ops
+ * or iommu ops that will work fine
+ */
+ dev->ofdev.dev.archdata.dma_ops =
+ chip->lbus.pdev->dev.archdata.dma_ops;
+ dev->ofdev.dev.archdata.dma_data =
+ chip->lbus.pdev->dev.archdata.dma_data;
+#endif /* CONFIG_PCI */
+
#ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 36e0675be9f..020f9573fd8 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -231,6 +231,17 @@ config DM_MIRROR
Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'.
+config DM_LOG_USERSPACE
+ tristate "Mirror userspace logging (EXPERIMENTAL)"
+ depends on DM_MIRROR && EXPERIMENTAL && NET
+ select CONNECTOR
+ ---help---
+ The userspace logging module provides a mechanism for
+ relaying the dm-dirty-log API to userspace. Log designs
+ which are more suited to userspace implementation (e.g.
+ shared storage logs) or experimental logs can be implemented
+ by leveraging this framework.
+
config DM_ZERO
tristate "Zero target"
depends on BLK_DEV_DM
@@ -249,6 +260,25 @@ config DM_MULTIPATH
---help---
Allow volume managers to support multipath hardware.
+config DM_MULTIPATH_QL
+ tristate "I/O Path Selector based on the number of in-flight I/Os"
+ depends on DM_MULTIPATH
+ ---help---
+ This path selector is a dynamic load balancer which selects
+ the path with the least number of in-flight I/Os.
+
+ If unsure, say N.
+
+config DM_MULTIPATH_ST
+ tristate "I/O Path Selector based on the service time"
+ depends on DM_MULTIPATH
+ ---help---
+ This path selector is a dynamic load balancer which selects
+ the path expected to complete the incoming I/O in the shortest
+ time.
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 45cc5951d92..1dc4185bd78 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -8,6 +8,8 @@ dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
dm-mirror-y += dm-raid1.o
+dm-log-userspace-y \
+ += dm-log-userspace-base.o dm-log-userspace-transfer.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
@@ -36,8 +38,11 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
+obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
+obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
+obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53394e863c7..9933eb861c7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1132,6 +1132,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_crypt_queue;
}
+ ti->num_flush_requests = 1;
ti->private = cc;
return 0;
@@ -1189,6 +1190,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct dm_crypt_io *io;
+ struct crypt_config *cc;
+
+ if (unlikely(bio_empty_barrier(bio))) {
+ cc = ti->private;
+ bio->bi_bdev = cc->dev->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
@@ -1305,9 +1313,17 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static int crypt_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct crypt_config *cc = ti->private;
+
+ return fn(ti, cc->dev, cc->start, data);
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
@@ -1318,6 +1334,7 @@ static struct target_type crypt_target = {
.resume = crypt_resume,
.message = crypt_message,
.merge = crypt_merge,
+ .iterate_devices = crypt_iterate_devices,
};
static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 559dbb52bc8..4e5b843cd4d 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -197,6 +197,7 @@ out:
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
+ ti->num_flush_requests = 1;
ti->private = dc;
return 0;
@@ -278,8 +279,9 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
- bio->bi_sector = dc->start_write +
- (bio->bi_sector - ti->begin);
+ if (bio_sectors(bio))
+ bio->bi_sector = dc->start_write +
+ (bio->bi_sector - ti->begin);
return delay_bio(dc, dc->write_delay, bio);
}
@@ -316,9 +318,26 @@ static int delay_status(struct dm_target *ti, status_type_t type,
return 0;
}
+static int delay_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct delay_c *dc = ti->private;
+ int ret = 0;
+
+ ret = fn(ti, dc->dev_read, dc->start_read, data);
+ if (ret)
+ goto out;
+
+ if (dc->dev_write)
+ ret = fn(ti, dc->dev_write, dc->start_write, data);
+
+out:
+ return ret;
+}
+
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 0, 2},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
@@ -326,6 +345,7 @@ static struct target_type delay_target = {
.presuspend = delay_presuspend,
.resume = delay_resume,
.status = delay_status,
+ .iterate_devices = delay_iterate_devices,
};
static int __init dm_delay_init(void)
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 75d8081a904..3710ff88fc1 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_exception_store **store)
{
int r = 0;
- struct dm_exception_store_type *type;
+ struct dm_exception_store_type *type = NULL;
struct dm_exception_store *tmp_store;
char persistent;
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
}
persistent = toupper(*argv[1]);
- if (persistent != 'P' && persistent != 'N') {
+ if (persistent == 'P')
+ type = get_type("P");
+ else if (persistent == 'N')
+ type = get_type("N");
+ else {
ti->error = "Persistent flag is not P or N";
return -EINVAL;
}
- type = get_type(argv[1]);
if (!type) {
ti->error = "Exception store type not recognised";
r = -EINVAL;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index c92701dc500..2442c8c0789 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -156,7 +156,7 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
*/
static inline sector_t get_dev_size(struct block_device *bdev)
{
- return bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
}
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index e73aabd61cd..3a2e6a2f8bd 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -22,6 +22,7 @@ struct dm_io_client {
/* FIXME: can we shrink this ? */
struct io {
unsigned long error_bits;
+ unsigned long eopnotsupp_bits;
atomic_t count;
struct task_struct *sleeper;
struct dm_io_client *client;
@@ -107,8 +108,11 @@ static inline unsigned bio_get_region(struct bio *bio)
*---------------------------------------------------------------*/
static void dec_count(struct io *io, unsigned int region, int error)
{
- if (error)
+ if (error) {
set_bit(region, &io->error_bits);
+ if (error == -EOPNOTSUPP)
+ set_bit(region, &io->eopnotsupp_bits);
+ }
if (atomic_dec_and_test(&io->count)) {
if (io->sleeper)
@@ -360,7 +364,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
return -EIO;
}
+retry:
io.error_bits = 0;
+ io.eopnotsupp_bits = 0;
atomic_set(&io.count, 1); /* see dispatch_io() */
io.sleeper = current;
io.client = client;
@@ -377,6 +383,11 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
set_current_state(TASK_RUNNING);
+ if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
+ rw &= ~(1 << BIO_RW_BARRIER);
+ goto retry;
+ }
+
if (error_bits)
*error_bits = io.error_bits;
@@ -397,6 +408,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io = mempool_alloc(client->pool, GFP_NOIO);
io->error_bits = 0;
+ io->eopnotsupp_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->sleeper = NULL;
io->client = client;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1128d3fba79..7f77f18fcaf 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -276,7 +276,7 @@ retry:
up_write(&_hash_lock);
}
-static int dm_hash_rename(const char *old, const char *new)
+static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
@@ -333,7 +333,7 @@ static int dm_hash_rename(const char *old, const char *new)
dm_table_put(table);
}
- dm_kobject_uevent(hc->md);
+ dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
dm_put(hc->md);
up_write(&_hash_lock);
@@ -680,6 +680,9 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
__hash_remove(hc);
up_write(&_hash_lock);
+
+ dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
+
dm_put(md);
param->data_size = 0;
return 0;
@@ -715,7 +718,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
return r;
param->data_size = 0;
- return dm_hash_rename(param->name, new_name);
+ return dm_hash_rename(param->event_nr, param->name, new_name);
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -842,8 +845,11 @@ static int do_resume(struct dm_ioctl *param)
if (dm_suspended(md))
r = dm_resume(md);
- if (!r)
+
+ if (!r) {
+ dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
r = __dev_status(md, param);
+ }
dm_put(md);
return r;
@@ -1044,6 +1050,12 @@ static int populate_table(struct dm_table *table,
next = spec->next;
}
+ r = dm_table_set_type(table);
+ if (r) {
+ DMWARN("unable to set table type");
+ return r;
+ }
+
return dm_table_complete(table);
}
@@ -1089,6 +1101,13 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out;
}
+ r = dm_table_alloc_md_mempools(t);
+ if (r) {
+ DMWARN("unable to allocate mempools for this table");
+ dm_table_destroy(t);
+ goto out;
+ }
+
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 79fb53e51c7..9184b6deb86 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
+ ti->num_flush_requests = 1;
ti->private = lc;
return 0;
@@ -81,7 +82,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
struct linear_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev;
- bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+ if (bio_sectors(bio))
+ bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio,
@@ -132,9 +134,17 @@ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static int linear_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct linear_c *lc = ti->private;
+
+ return fn(ti, lc->dev, lc->start, data);
+}
+
static struct target_type linear_target = {
.name = "linear",
- .version= {1, 0, 3},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
@@ -142,6 +152,7 @@ static struct target_type linear_target = {
.status = linear_status,
.ioctl = linear_ioctl,
.merge = linear_merge,
+ .iterate_devices = linear_iterate_devices,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
new file mode 100644
index 00000000000..e69b9656099
--- /dev/null
+++ b/drivers/md/dm-log-userspace-base.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#include <linux/bio.h>
+#include <linux/dm-dirty-log.h>
+#include <linux/device-mapper.h>
+#include <linux/dm-log-userspace.h>
+
+#include "dm-log-userspace-transfer.h"
+
+struct flush_entry {
+ int type;
+ region_t region;
+ struct list_head list;
+};
+
+struct log_c {
+ struct dm_target *ti;
+ uint32_t region_size;
+ region_t region_count;
+ char uuid[DM_UUID_LEN];
+
+ char *usr_argv_str;
+ uint32_t usr_argc;
+
+ /*
+ * in_sync_hint gets set when doing is_remote_recovering. It
+ * represents the first region that needs recovery. IOW, the
+ * first zero bit of sync_bits. This can be useful for to limit
+ * traffic for calls like is_remote_recovering and get_resync_work,
+ * but be take care in its use for anything else.
+ */
+ uint64_t in_sync_hint;
+
+ spinlock_t flush_lock;
+ struct list_head flush_list; /* only for clear and mark requests */
+};
+
+static mempool_t *flush_entry_pool;
+
+static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ return kmalloc(sizeof(struct flush_entry), gfp_mask);
+}
+
+static void flush_entry_free(void *element, void *pool_data)
+{
+ kfree(element);
+}
+
+static int userspace_do_request(struct log_c *lc, const char *uuid,
+ int request_type, char *data, size_t data_size,
+ char *rdata, size_t *rdata_size)
+{
+ int r;
+
+ /*
+ * If the server isn't there, -ESRCH is returned,
+ * and we must keep trying until the server is
+ * restored.
+ */
+retry:
+ r = dm_consult_userspace(uuid, request_type, data,
+ data_size, rdata, rdata_size);
+
+ if (r != -ESRCH)
+ return r;
+
+ DMERR(" Userspace log server not found.");
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2*HZ);
+ DMWARN("Attempting to contact userspace log server...");
+ r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str,
+ strlen(lc->usr_argv_str) + 1,
+ NULL, NULL);
+ if (!r)
+ break;
+ }
+ DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
+ r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL,
+ 0, NULL, NULL);
+ if (!r)
+ goto retry;
+
+ DMERR("Error trying to resume userspace log: %d", r);
+
+ return -ESRCH;
+}
+
+static int build_constructor_string(struct dm_target *ti,
+ unsigned argc, char **argv,
+ char **ctr_str)
+{
+ int i, str_size;
+ char *str = NULL;
+
+ *ctr_str = NULL;
+
+ for (i = 0, str_size = 0; i < argc; i++)
+ str_size += strlen(argv[i]) + 1; /* +1 for space between args */
+
+ str_size += 20; /* Max number of chars in a printed u64 number */
+
+ str = kzalloc(str_size, GFP_KERNEL);
+ if (!str) {
+ DMWARN("Unable to allocate memory for constructor string");
+ return -ENOMEM;
+ }
+
+ for (i = 0, str_size = 0; i < argc; i++)
+ str_size += sprintf(str + str_size, "%s ", argv[i]);
+ str_size += sprintf(str + str_size, "%llu",
+ (unsigned long long)ti->len);
+
+ *ctr_str = str;
+ return str_size;
+}
+
+/*
+ * userspace_ctr
+ *
+ * argv contains:
+ * <UUID> <other args>
+ * Where 'other args' is the userspace implementation specific log
+ * arguments. An example might be:
+ * <UUID> clustered_disk <arg count> <log dev> <region_size> [[no]sync]
+ *
+ * So, this module will strip off the <UUID> for identification purposes
+ * when communicating with userspace about a log; but will pass on everything
+ * else.
+ */
+static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
+ unsigned argc, char **argv)
+{
+ int r = 0;
+ int str_size;
+ char *ctr_str = NULL;
+ struct log_c *lc = NULL;
+ uint64_t rdata;
+ size_t rdata_size = sizeof(rdata);
+
+ if (argc < 3) {
+ DMWARN("Too few arguments to userspace dirty log");
+ return -EINVAL;
+ }
+
+ lc = kmalloc(sizeof(*lc), GFP_KERNEL);
+ if (!lc) {
+ DMWARN("Unable to allocate userspace log context.");
+ return -ENOMEM;
+ }
+
+ lc->ti = ti;
+
+ if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
+ DMWARN("UUID argument too long.");
+ kfree(lc);
+ return -EINVAL;
+ }
+
+ strncpy(lc->uuid, argv[0], DM_UUID_LEN);
+ spin_lock_init(&lc->flush_lock);
+ INIT_LIST_HEAD(&lc->flush_list);
+
+ str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
+ if (str_size < 0) {
+ kfree(lc);
+ return str_size;
+ }
+
+ /* Send table string */
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR,
+ ctr_str, str_size, NULL, NULL);
+
+ if (r == -ESRCH) {
+ DMERR("Userspace log server not found");
+ goto out;
+ }
+
+ /* Since the region size does not change, get it now */
+ rdata_size = sizeof(rdata);
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE,
+ NULL, 0, (char *)&rdata, &rdata_size);
+
+ if (r) {
+ DMERR("Failed to get region size of dirty log");
+ goto out;
+ }
+
+ lc->region_size = (uint32_t)rdata;
+ lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
+
+out:
+ if (r) {
+ kfree(lc);
+ kfree(ctr_str);
+ } else {
+ lc->usr_argv_str = ctr_str;
+ lc->usr_argc = argc;
+ log->context = lc;
+ }
+
+ return r;
+}
+
+static void userspace_dtr(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR,
+ NULL, 0,
+ NULL, NULL);
+
+ kfree(lc->usr_argv_str);
+ kfree(lc);
+
+ return;
+}
+
+static int userspace_presuspend(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static int userspace_postsuspend(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static int userspace_resume(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ lc->in_sync_hint = 0;
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
+{
+ struct log_c *lc = log->context;
+
+ return lc->region_size;
+}
+
+/*
+ * userspace_is_clean
+ *
+ * Check whether a region is clean. If there is any sort of
+ * failure when consulting the server, we return not clean.
+ *
+ * Returns: 1 if clean, 0 otherwise
+ */
+static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
+{
+ int r;
+ uint64_t region64 = (uint64_t)region;
+ int64_t is_clean;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+
+ rdata_size = sizeof(is_clean);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
+ (char *)&region64, sizeof(region64),
+ (char *)&is_clean, &rdata_size);
+
+ return (r) ? 0 : (int)is_clean;
+}
+
+/*
+ * userspace_in_sync
+ *
+ * Check if the region is in-sync. If there is any sort
+ * of failure when consulting the server, we assume that
+ * the region is not in sync.
+ *
+ * If 'can_block' is set, return immediately
+ *
+ * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
+ */
+static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
+ int can_block)
+{
+ int r;
+ uint64_t region64 = region;
+ int64_t in_sync;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+
+ /*
+ * We can never respond directly - even if in_sync_hint is
+ * set. This is because another machine could see a device
+ * failure and mark the region out-of-sync. If we don't go
+ * to userspace to ask, we might think the region is in-sync
+ * and allow a read to pick up data that is stale. (This is
+ * very unlikely if a device actually fails; but it is very
+ * likely if a connection to one device from one machine fails.)
+ *
+ * There still might be a problem if the mirror caches the region
+ * state as in-sync... but then this call would not be made. So,
+ * that is a mirror problem.
+ */
+ if (!can_block)
+ return -EWOULDBLOCK;
+
+ rdata_size = sizeof(in_sync);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
+ (char *)&region64, sizeof(region64),
+ (char *)&in_sync, &rdata_size);
+ return (r) ? 0 : (int)in_sync;
+}
+
+/*
+ * userspace_flush
+ *
+ * This function is ok to block.
+ * The flush happens in two stages. First, it sends all
+ * clear/mark requests that are on the list. Then it
+ * tells the server to commit them. This gives the
+ * server a chance to optimise the commit, instead of
+ * doing it for every request.
+ *
+ * Additionally, we could implement another thread that
+ * sends the requests up to the server - reducing the
+ * load on flush. Then the flush would have less in
+ * the list and be responsible for the finishing commit.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+static int userspace_flush(struct dm_dirty_log *log)
+{
+ int r = 0;
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ LIST_HEAD(flush_list);
+ struct flush_entry *fe, *tmp_fe;
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ list_splice_init(&lc->flush_list, &flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ if (list_empty(&flush_list))
+ return 0;
+
+ /*
+ * FIXME: Count up requests, group request types,
+ * allocate memory to stick all requests in and
+ * send to server in one go. Failing the allocation,
+ * do it one by one.
+ */
+
+ list_for_each_entry(fe, &flush_list, list) {
+ r = userspace_do_request(lc, lc->uuid, fe->type,
+ (char *)&fe->region,
+ sizeof(fe->region),
+ NULL, NULL);
+ if (r)
+ goto fail;
+ }
+
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
+ NULL, 0, NULL, NULL);
+
+fail:
+ /*
+ * We can safely remove these entries, even if failure.
+ * Calling code will receive an error and will know that
+ * the log facility has failed.
+ */
+ list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) {
+ list_del(&fe->list);
+ mempool_free(fe, flush_entry_pool);
+ }
+
+ if (r)
+ dm_table_event(lc->ti->table);
+
+ return r;
+}
+
+/*
+ * userspace_mark_region
+ *
+ * This function should avoid blocking unless absolutely required.
+ * (Memory allocation is valid for blocking.)
+ */
+static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
+{
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ struct flush_entry *fe;
+
+ /* Wait for an allocation, but _never_ fail */
+ fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
+ BUG_ON(!fe);
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ fe->type = DM_ULOG_MARK_REGION;
+ fe->region = region;
+ list_add(&fe->list, &lc->flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ return;
+}
+
+/*
+ * userspace_clear_region
+ *
+ * This function must not block.
+ * So, the alloc can't block. In the worst case, it is ok to
+ * fail. It would simply mean we can't clear the region.
+ * Does nothing to current sync context, but does mean
+ * the region will be re-sync'ed on a reload of the mirror
+ * even though it is in-sync.
+ */
+static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
+{
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ struct flush_entry *fe;
+
+ /*
+ * If we fail to allocate, we skip the clearing of
+ * the region. This doesn't hurt us in any way, except
+ * to cause the region to be resync'ed when the
+ * device is activated next time.
+ */
+ fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
+ if (!fe) {
+ DMERR("Failed to allocate memory to clear region.");
+ return;
+ }
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ fe->type = DM_ULOG_CLEAR_REGION;
+ fe->region = region;
+ list_add(&fe->list, &lc->flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ return;
+}
+
+/*
+ * userspace_get_resync_work
+ *
+ * Get a region that needs recovery. It is valid to return
+ * an error for this function.
+ *
+ * Returns: 1 if region filled, 0 if no work, <0 on error
+ */
+static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
+{
+ int r;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+ struct {
+ int64_t i; /* 64-bit for mix arch compatibility */
+ region_t r;
+ } pkg;
+
+ if (lc->in_sync_hint >= lc->region_count)
+ return 0;
+
+ rdata_size = sizeof(pkg);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
+ NULL, 0,
+ (char *)&pkg, &rdata_size);
+
+ *region = pkg.r;
+ return (r) ? r : (int)pkg.i;
+}
+
+/*
+ * userspace_set_region_sync
+ *
+ * Set the sync status of a given region. This function
+ * must not fail.
+ */
+static void userspace_set_region_sync(struct dm_dirty_log *log,
+ region_t region, int in_sync)
+{
+ int r;
+ struct log_c *lc = log->context;
+ struct {
+ region_t r;
+ int64_t i;
+ } pkg;
+
+ pkg.r = region;
+ pkg.i = (int64_t)in_sync;
+
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
+ (char *)&pkg, sizeof(pkg),
+ NULL, NULL);
+
+ /*
+ * It would be nice to be able to report failures.
+ * However, it is easy emough to detect and resolve.
+ */
+ return;
+}
+
+/*
+ * userspace_get_sync_count
+ *
+ * If there is any sort of failure when consulting the server,
+ * we assume that the sync count is zero.
+ *
+ * Returns: sync count on success, 0 on failure
+ */
+static region_t userspace_get_sync_count(struct dm_dirty_log *log)
+{
+ int r;
+ size_t rdata_size;
+ uint64_t sync_count;
+ struct log_c *lc = log->context;
+
+ rdata_size = sizeof(sync_count);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
+ NULL, 0,
+ (char *)&sync_count, &rdata_size);
+
+ if (r)
+ return 0;
+
+ if (sync_count >= lc->region_count)
+ lc->in_sync_hint = lc->region_count;
+
+ return (region_t)sync_count;
+}
+
+/*
+ * userspace_status
+ *
+ * Returns: amount of space consumed
+ */
+static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
+ char *result, unsigned maxlen)
+{
+ int r = 0;
+ size_t sz = (size_t)maxlen;
+ struct log_c *lc = log->context;
+
+ switch (status_type) {
+ case STATUSTYPE_INFO:
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
+ NULL, 0,
+ result, &sz);
+
+ if (r) {
+ sz = 0;
+ DMEMIT("%s 1 COM_FAILURE", log->type->name);
+ }
+ break;
+ case STATUSTYPE_TABLE:
+ sz = 0;
+ DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1,
+ lc->uuid, lc->usr_argv_str);
+ break;
+ }
+ return (r) ? 0 : (int)sz;
+}
+
+/*
+ * userspace_is_remote_recovering
+ *
+ * Returns: 1 if region recovering, 0 otherwise
+ */
+static int userspace_is_remote_recovering(struct dm_dirty_log *log,
+ region_t region)
+{
+ int r;
+ uint64_t region64 = region;
+ struct log_c *lc = log->context;
+ static unsigned long long limit;
+ struct {
+ int64_t is_recovering;
+ uint64_t in_sync_hint;
+ } pkg;
+ size_t rdata_size = sizeof(pkg);
+
+ /*
+ * Once the mirror has been reported to be in-sync,
+ * it will never again ask for recovery work. So,
+ * we can safely say there is not a remote machine
+ * recovering if the device is in-sync. (in_sync_hint
+ * must be reset at resume time.)
+ */
+ if (region < lc->in_sync_hint)
+ return 0;
+ else if (jiffies < limit)
+ return 1;
+
+ limit = jiffies + (HZ / 4);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
+ (char *)&region64, sizeof(region64),
+ (char *)&pkg, &rdata_size);
+ if (r)
+ return 1;
+
+ lc->in_sync_hint = pkg.in_sync_hint;
+
+ return (int)pkg.is_recovering;
+}
+
+static struct dm_dirty_log_type _userspace_type = {
+ .name = "userspace",
+ .module = THIS_MODULE,
+ .ctr = userspace_ctr,
+ .dtr = userspace_dtr,
+ .presuspend = userspace_presuspend,
+ .postsuspend = userspace_postsuspend,
+ .resume = userspace_resume,
+ .get_region_size = userspace_get_region_size,
+ .is_clean = userspace_is_clean,
+ .in_sync = userspace_in_sync,
+ .flush = userspace_flush,
+ .mark_region = userspace_mark_region,
+ .clear_region = userspace_clear_region,
+ .get_resync_work = userspace_get_resync_work,
+ .set_region_sync = userspace_set_region_sync,
+ .get_sync_count = userspace_get_sync_count,
+ .status = userspace_status,
+ .is_remote_recovering = userspace_is_remote_recovering,
+};
+
+static int __init userspace_dirty_log_init(void)
+{
+ int r = 0;
+
+ flush_entry_pool = mempool_create(100, flush_entry_alloc,
+ flush_entry_free, NULL);
+
+ if (!flush_entry_pool) {
+ DMWARN("Unable to create flush_entry_pool: No memory.");
+ return -ENOMEM;
+ }
+
+ r = dm_ulog_tfr_init();
+ if (r) {
+ DMWARN("Unable to initialize userspace log communications");
+ mempool_destroy(flush_entry_pool);
+ return r;
+ }
+
+ r = dm_dirty_log_type_register(&_userspace_type);
+ if (r) {
+ DMWARN("Couldn't register userspace dirty log type");
+ dm_ulog_tfr_exit();
+ mempool_destroy(flush_entry_pool);
+ return r;
+ }
+
+ DMINFO("version 1.0.0 loaded");
+ return 0;
+}
+
+static void __exit userspace_dirty_log_exit(void)
+{
+ dm_dirty_log_type_unregister(&_userspace_type);
+ dm_ulog_tfr_exit();
+ mempool_destroy(flush_entry_pool);
+
+ DMINFO("version 1.0.0 unloaded");
+ return;
+}
+
+module_init(userspace_dirty_log_init);
+module_exit(userspace_dirty_log_exit);
+
+MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
+MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
new file mode 100644
index 00000000000..0ca1ee768a1
--- /dev/null
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/connector.h>
+#include <linux/device-mapper.h>
+#include <linux/dm-log-userspace.h>
+
+#include "dm-log-userspace-transfer.h"
+
+static uint32_t dm_ulog_seq;
+
+/*
+ * Netlink/Connector is an unreliable protocol. How long should
+ * we wait for a response before assuming it was lost and retrying?
+ * (If we do receive a response after this time, it will be discarded
+ * and the response to the resent request will be waited for.
+ */
+#define DM_ULOG_RETRY_TIMEOUT (15 * HZ)
+
+/*
+ * Pre-allocated space for speed
+ */
+#define DM_ULOG_PREALLOCED_SIZE 512
+static struct cn_msg *prealloced_cn_msg;
+static struct dm_ulog_request *prealloced_ulog_tfr;
+
+static struct cb_id ulog_cn_id = {
+ .idx = CN_IDX_DM,
+ .val = CN_VAL_DM_USERSPACE_LOG
+};
+
+static DEFINE_MUTEX(dm_ulog_lock);
+
+struct receiving_pkg {
+ struct list_head list;
+ struct completion complete;
+
+ uint32_t seq;
+
+ int error;
+ size_t *data_size;
+ char *data;
+};
+
+static DEFINE_SPINLOCK(receiving_list_lock);
+static struct list_head receiving_list;
+
+static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
+{
+ int r;
+ struct cn_msg *msg = prealloced_cn_msg;
+
+ memset(msg, 0, sizeof(struct cn_msg));
+
+ msg->id.idx = ulog_cn_id.idx;
+ msg->id.val = ulog_cn_id.val;
+ msg->ack = 0;
+ msg->seq = tfr->seq;
+ msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
+
+ r = cn_netlink_send(msg, 0, gfp_any());
+
+ return r;
+}
+
+/*
+ * Parameters for this function can be either msg or tfr, but not
+ * both. This function fills in the reply for a waiting request.
+ * If just msg is given, then the reply is simply an ACK from userspace
+ * that the request was received.
+ *
+ * Returns: 0 on success, -ENOENT on failure
+ */
+static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
+{
+ uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0;
+ struct receiving_pkg *pkg;
+
+ /*
+ * The 'receiving_pkg' entries in this list are statically
+ * allocated on the stack in 'dm_consult_userspace'.
+ * Each process that is waiting for a reply from the user
+ * space server will have an entry in this list.
+ *
+ * We are safe to do it this way because the stack space
+ * is unique to each process, but still addressable by
+ * other processes.
+ */
+ list_for_each_entry(pkg, &receiving_list, list) {
+ if (rtn_seq != pkg->seq)
+ continue;
+
+ if (msg) {
+ pkg->error = -msg->ack;
+ /*
+ * If we are trying again, we will need to know our
+ * storage capacity. Otherwise, along with the
+ * error code, we make explicit that we have no data.
+ */
+ if (pkg->error != -EAGAIN)
+ *(pkg->data_size) = 0;
+ } else if (tfr->data_size > *(pkg->data_size)) {
+ DMERR("Insufficient space to receive package [%u] "
+ "(%u vs %lu)", tfr->request_type,
+ tfr->data_size, *(pkg->data_size));
+
+ *(pkg->data_size) = 0;
+ pkg->error = -ENOSPC;
+ } else {
+ pkg->error = tfr->error;
+ memcpy(pkg->data, tfr->data, tfr->data_size);
+ *(pkg->data_size) = tfr->data_size;
+ }
+ complete(&pkg->complete);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * This is the connector callback that delivers data
+ * that was sent from userspace.
+ */
+static void cn_ulog_callback(void *data)
+{
+ struct cn_msg *msg = (struct cn_msg *)data;
+ struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
+
+ spin_lock(&receiving_list_lock);
+ if (msg->len == 0)
+ fill_pkg(msg, NULL);
+ else if (msg->len < sizeof(*tfr))
+ DMERR("Incomplete message received (expected %u, got %u): [%u]",
+ (unsigned)sizeof(*tfr), msg->len, msg->seq);
+ else
+ fill_pkg(NULL, tfr);
+ spin_unlock(&receiving_list_lock);
+}
+
+/**
+ * dm_consult_userspace
+ * @uuid: log's uuid (must be DM_UUID_LEN in size)
+ * @request_type: found in include/linux/dm-log-userspace.h
+ * @data: data to tx to the server
+ * @data_size: size of data in bytes
+ * @rdata: place to put return data from server
+ * @rdata_size: value-result (amount of space given/amount of space used)
+ *
+ * rdata_size is undefined on failure.
+ *
+ * Memory used to communicate with userspace is zero'ed
+ * before populating to ensure that no unwanted bits leak
+ * from kernel space to user-space. All userspace log communications
+ * between kernel and user space go through this function.
+ *
+ * Returns: 0 on success, -EXXX on failure
+ **/
+int dm_consult_userspace(const char *uuid, int request_type,
+ char *data, size_t data_size,
+ char *rdata, size_t *rdata_size)
+{
+ int r = 0;
+ size_t dummy = 0;
+ int overhead_size =
+ sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
+ struct dm_ulog_request *tfr = prealloced_ulog_tfr;
+ struct receiving_pkg pkg;
+
+ if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
+ DMINFO("Size of tfr exceeds preallocated size");
+ return -EINVAL;
+ }
+
+ if (!rdata_size)
+ rdata_size = &dummy;
+resend:
+ /*
+ * We serialize the sending of requests so we can
+ * use the preallocated space.
+ */
+ mutex_lock(&dm_ulog_lock);
+
+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
+ memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+ tfr->seq = dm_ulog_seq++;
+
+ /*
+ * Must be valid request type (all other bits set to
+ * zero). This reserves other bits for possible future
+ * use.
+ */
+ tfr->request_type = request_type & DM_ULOG_REQUEST_MASK;
+
+ tfr->data_size = data_size;
+ if (data && data_size)
+ memcpy(tfr->data, data, data_size);
+
+ memset(&pkg, 0, sizeof(pkg));
+ init_completion(&pkg.complete);
+ pkg.seq = tfr->seq;
+ pkg.data_size = rdata_size;
+ pkg.data = rdata;
+ spin_lock(&receiving_list_lock);
+ list_add(&(pkg.list), &receiving_list);
+ spin_unlock(&receiving_list_lock);
+
+ r = dm_ulog_sendto_server(tfr);
+
+ mutex_unlock(&dm_ulog_lock);
+
+ if (r) {
+ DMERR("Unable to send log request [%u] to userspace: %d",
+ request_type, r);
+ spin_lock(&receiving_list_lock);
+ list_del_init(&(pkg.list));
+ spin_unlock(&receiving_list_lock);
+
+ goto out;
+ }
+
+ r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
+ spin_lock(&receiving_list_lock);
+ list_del_init(&(pkg.list));
+ spin_unlock(&receiving_list_lock);
+ if (!r) {
+ DMWARN("[%s] Request timed out: [%u/%u] - retrying",
+ (strlen(uuid) > 8) ?
+ (uuid + (strlen(uuid) - 8)) : (uuid),
+ request_type, pkg.seq);
+ goto resend;
+ }
+
+ r = pkg.error;
+ if (r == -EAGAIN)
+ goto resend;
+
+out:
+ return r;
+}
+
+int dm_ulog_tfr_init(void)
+{
+ int r;
+ void *prealloced;
+
+ INIT_LIST_HEAD(&receiving_list);
+
+ prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL);
+ if (!prealloced)
+ return -ENOMEM;
+
+ prealloced_cn_msg = prealloced;
+ prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg);
+
+ r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
+ if (r) {
+ cn_del_callback(&ulog_cn_id);
+ return r;
+ }
+
+ return 0;
+}
+
+void dm_ulog_tfr_exit(void)
+{
+ cn_del_callback(&ulog_cn_id);
+ kfree(prealloced_cn_msg);
+}
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
new file mode 100644
index 00000000000..c26d8e4e271
--- /dev/null
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef __DM_LOG_USERSPACE_TRANSFER_H__
+#define __DM_LOG_USERSPACE_TRANSFER_H__
+
+#define DM_MSG_PREFIX "dm-log-userspace"
+
+int dm_ulog_tfr_init(void);
+void dm_ulog_tfr_exit(void);
+int dm_consult_userspace(const char *uuid, int request_type,
+ char *data, size_t data_size,
+ char *rdata, size_t *rdata_size);
+
+#endif /* __DM_LOG_USERSPACE_TRANSFER_H__ */
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6fa8ccf91c7..9443896ede0 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -412,11 +412,12 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
/*
* Buffer holds both header and bitset.
*/
- buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
- bitset_size,
- ti->limits.logical_block_size);
+ buf_size =
+ dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
+ bdev_logical_block_size(lc->header_location.
+ bdev));
- if (buf_size > dev->bdev->bd_inode->i_size) {
+ if (buf_size > i_size_read(dev->bdev->bd_inode)) {
DMWARN("log device %s too small: need %llu bytes",
dev->name, (unsigned long long)buf_size);
kfree(lc);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6a386ab4f7e..c70604a2089 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -8,7 +8,6 @@
#include <linux/device-mapper.h>
#include "dm-path-selector.h"
-#include "dm-bio-record.h"
#include "dm-uevent.h"
#include <linux/ctype.h>
@@ -35,6 +34,7 @@ struct pgpath {
struct dm_path path;
struct work_struct deactivate_path;
+ struct work_struct activate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -64,8 +64,6 @@ struct multipath {
spinlock_t lock;
const char *hw_handler_name;
- struct work_struct activate_path;
- struct pgpath *pgpath_to_activate;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
@@ -84,7 +82,7 @@ struct multipath {
unsigned pg_init_count; /* Number of times pg_init called */
struct work_struct process_queued_ios;
- struct bio_list queued_ios;
+ struct list_head queued_ios;
unsigned queue_size;
struct work_struct trigger_event;
@@ -101,7 +99,7 @@ struct multipath {
*/
struct dm_mpath_io {
struct pgpath *pgpath;
- struct dm_bio_details details;
+ size_t nr_bytes;
};
typedef int (*action_fn) (struct pgpath *pgpath);
@@ -128,6 +126,7 @@ static struct pgpath *alloc_pgpath(void)
if (pgpath) {
pgpath->is_active = 1;
INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+ INIT_WORK(&pgpath->activate_path, activate_path);
}
return pgpath;
@@ -160,7 +159,6 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
- unsigned long flags;
struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private;
@@ -169,10 +167,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
- spin_lock_irqsave(&m->lock, flags);
- if (m->pgpath_to_activate == pgpath)
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
free_pgpath(pgpath);
}
}
@@ -198,11 +192,11 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
+ INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock);
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
- INIT_WORK(&m->activate_path, activate_path);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
@@ -250,11 +244,12 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
m->pg_init_count = 0;
}
-static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
+static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
+ size_t nr_bytes)
{
struct dm_path *path;
- path = pg->ps.type->select_path(&pg->ps, &m->repeat_count);
+ path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
if (!path)
return -ENXIO;
@@ -266,7 +261,7 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
return 0;
}
-static void __choose_pgpath(struct multipath *m)
+static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
{
struct priority_group *pg;
unsigned bypassed = 1;
@@ -278,12 +273,12 @@ static void __choose_pgpath(struct multipath *m)
if (m->next_pg) {
pg = m->next_pg;
m->next_pg = NULL;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
/* Don't change PG until it has no remaining paths */
- if (m->current_pg && !__choose_path_in_pg(m, m->current_pg))
+ if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
return;
/*
@@ -295,7 +290,7 @@ static void __choose_pgpath(struct multipath *m)
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
} while (bypassed--);
@@ -322,19 +317,21 @@ static int __must_push_back(struct multipath *m)
dm_noflush_suspending(m->ti));
}
-static int map_io(struct multipath *m, struct bio *bio,
+static int map_io(struct multipath *m, struct request *clone,
struct dm_mpath_io *mpio, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
+ size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
+ struct block_device *bdev;
spin_lock_irqsave(&m->lock, flags);
/* Do we need to select a new pgpath? */
if (!m->current_pgpath ||
(!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
- __choose_pgpath(m);
+ __choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath;
@@ -344,21 +341,28 @@ static int map_io(struct multipath *m, struct bio *bio,
if ((pgpath && m->queue_io) ||
(!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
- bio_list_add(&m->queued_ios, bio);
+ list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
if ((m->pg_init_required && !m->pg_init_in_progress) ||
!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = DM_MAPIO_SUBMITTED;
- } else if (pgpath)
- bio->bi_bdev = pgpath->path.dev->bdev;
- else if (__must_push_back(m))
+ } else if (pgpath) {
+ bdev = pgpath->path.dev->bdev;
+ clone->q = bdev_get_queue(bdev);
+ clone->rq_disk = bdev->bd_disk;
+ } else if (__must_push_back(m))
r = DM_MAPIO_REQUEUE;
else
r = -EIO; /* Failed */
mpio->pgpath = pgpath;
+ mpio->nr_bytes = nr_bytes;
+
+ if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
+ pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
+ nr_bytes);
spin_unlock_irqrestore(&m->lock, flags);
@@ -396,30 +400,31 @@ static void dispatch_queued_ios(struct multipath *m)
{
int r;
unsigned long flags;
- struct bio *bio = NULL, *next;
struct dm_mpath_io *mpio;
union map_info *info;
+ struct request *clone, *n;
+ LIST_HEAD(cl);
spin_lock_irqsave(&m->lock, flags);
- bio = bio_list_get(&m->queued_ios);
+ list_splice_init(&m->queued_ios, &cl);
spin_unlock_irqrestore(&m->lock, flags);
- while (bio) {
- next = bio->bi_next;
- bio->bi_next = NULL;
+ list_for_each_entry_safe(clone, n, &cl, queuelist) {
+ list_del_init(&clone->queuelist);
- info = dm_get_mapinfo(bio);
+ info = dm_get_rq_mapinfo(clone);
mpio = info->ptr;
- r = map_io(m, bio, mpio, 1);
- if (r < 0)
- bio_endio(bio, r);
- else if (r == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
- else if (r == DM_MAPIO_REQUEUE)
- bio_endio(bio, -EIO);
-
- bio = next;
+ r = map_io(m, clone, mpio, 1);
+ if (r < 0) {
+ mempool_free(mpio, m->mpio_pool);
+ dm_kill_unmapped_request(clone, r);
+ } else if (r == DM_MAPIO_REMAPPED)
+ dm_dispatch_request(clone);
+ else if (r == DM_MAPIO_REQUEUE) {
+ mempool_free(mpio, m->mpio_pool);
+ dm_requeue_unmapped_request(clone);
+ }
}
}
@@ -427,8 +432,8 @@ static void process_queued_ios(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
- struct pgpath *pgpath = NULL;
- unsigned init_required = 0, must_queue = 1;
+ struct pgpath *pgpath = NULL, *tmp;
+ unsigned must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
@@ -437,7 +442,7 @@ static void process_queued_ios(struct work_struct *work)
goto out;
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
pgpath = m->current_pgpath;
@@ -446,19 +451,15 @@ static void process_queued_ios(struct work_struct *work)
must_queue = 0;
if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
- m->pgpath_to_activate = pgpath;
m->pg_init_count++;
m->pg_init_required = 0;
- m->pg_init_in_progress = 1;
- init_required = 1;
+ list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
+ if (queue_work(kmpath_handlerd, &tmp->activate_path))
+ m->pg_init_in_progress++;
+ }
}
-
out:
spin_unlock_irqrestore(&m->lock, flags);
-
- if (init_required)
- queue_work(kmpath_handlerd, &m->activate_path);
-
if (!must_queue)
dispatch_queued_ios(m);
}
@@ -553,6 +554,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
return -EINVAL;
}
+ if (ps_argc > as->argc) {
+ dm_put_path_selector(pst);
+ ti->error = "not enough arguments for path selector";
+ return -EINVAL;
+ }
+
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
@@ -591,9 +598,20 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
}
if (m->hw_handler_name) {
- r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
- m->hw_handler_name);
+ struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
+
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ if (r == -EBUSY) {
+ /*
+ * Already attached to different hw_handler,
+ * try to reattach with correct one.
+ */
+ scsi_dh_detach(q);
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ }
+
if (r < 0) {
+ ti->error = "error attaching hardware handler";
dm_put_device(ti, p->path.dev);
goto bad;
}
@@ -699,6 +717,11 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
+ if (hw_argc > as->argc) {
+ ti->error = "not enough arguments for hardware handler";
+ return -EINVAL;
+ }
+
m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
@@ -823,6 +846,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
goto bad;
}
+ ti->num_flush_requests = 1;
+
return 0;
bad:
@@ -836,25 +861,29 @@ static void multipath_dtr(struct dm_target *ti)
flush_workqueue(kmpath_handlerd);
flush_workqueue(kmultipathd);
+ flush_scheduled_work();
free_multipath(m);
}
/*
- * Map bios, recording original fields for later in case we have to resubmit
+ * Map cloned requests
*/
-static int multipath_map(struct dm_target *ti, struct bio *bio,
+static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
int r;
struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private;
- mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
- dm_bio_record(&mpio->details, bio);
+ mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
+ if (!mpio)
+ /* ENOMEM, requeue */
+ return DM_MAPIO_REQUEUE;
+ memset(mpio, 0, sizeof(*mpio));
map_context->ptr = mpio;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
- r = map_io(m, bio, mpio, 0);
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ r = map_io(m, clone, mpio, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
mempool_free(mpio, m->mpio_pool);
@@ -924,9 +953,13 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->is_active = 1;
- m->current_pgpath = NULL;
- if (!m->nr_valid_paths++ && m->queue_size)
+ if (!m->nr_valid_paths++ && m->queue_size) {
+ m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios);
+ } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ m->pg_init_in_progress++;
+ }
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths);
@@ -1102,87 +1135,70 @@ static void pg_init_done(struct dm_path *path, int errors)
spin_lock_irqsave(&m->lock, flags);
if (errors) {
- DMERR("Could not failover device. Error %d.", errors);
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (pgpath == m->current_pgpath) {
+ DMERR("Could not failover device. Error %d.", errors);
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
} else if (!m->pg_init_required) {
m->queue_io = 0;
pg->bypassed = 0;
}
- m->pg_init_in_progress = 0;
- queue_work(kmultipathd, &m->process_queued_ios);
+ m->pg_init_in_progress--;
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
}
static void activate_path(struct work_struct *work)
{
int ret;
- struct multipath *m =
- container_of(work, struct multipath, activate_path);
- struct dm_path *path;
- unsigned long flags;
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, activate_path);
- spin_lock_irqsave(&m->lock, flags);
- path = &m->pgpath_to_activate->path;
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
- if (!path)
- return;
- ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
- pg_init_done(path, ret);
+ ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
+ pg_init_done(&pgpath->path, ret);
}
/*
* end_io handling
*/
-static int do_end_io(struct multipath *m, struct bio *bio,
+static int do_end_io(struct multipath *m, struct request *clone,
int error, struct dm_mpath_io *mpio)
{
+ /*
+ * We don't queue any clone request inside the multipath target
+ * during end I/O handling, since those clone requests don't have
+ * bio clones. If we queue them inside the multipath target,
+ * we need to make bio clones, that requires memory allocation.
+ * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
+ * don't have bio clones.)
+ * Instead of queueing the clone request here, we queue the original
+ * request into dm core, which will remake a clone request and
+ * clone bios for it and resubmit it later.
+ */
+ int r = DM_ENDIO_REQUEUE;
unsigned long flags;
- if (!error)
+ if (!error && !clone->errors)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
- return error;
-
if (error == -EOPNOTSUPP)
return error;
- spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths) {
- if (__must_push_back(m)) {
- spin_unlock_irqrestore(&m->lock, flags);
- return DM_ENDIO_REQUEUE;
- } else if (!m->queue_if_no_path) {
- spin_unlock_irqrestore(&m->lock, flags);
- return -EIO;
- } else {
- spin_unlock_irqrestore(&m->lock, flags);
- goto requeue;
- }
- }
- spin_unlock_irqrestore(&m->lock, flags);
-
if (mpio->pgpath)
fail_path(mpio->pgpath);
- requeue:
- dm_bio_restore(&mpio->details, bio);
-
- /* queue for the daemon to resubmit or fail */
spin_lock_irqsave(&m->lock, flags);
- bio_list_add(&m->queued_ios, bio);
- m->queue_size++;
- if (!m->queue_io)
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
+ r = -EIO;
spin_unlock_irqrestore(&m->lock, flags);
- return DM_ENDIO_INCOMPLETE; /* io not complete */
+ return r;
}
-static int multipath_end_io(struct dm_target *ti, struct bio *bio,
+static int multipath_end_io(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
struct multipath *m = ti->private;
@@ -1191,14 +1207,13 @@ static int multipath_end_io(struct dm_target *ti, struct bio *bio,
struct path_selector *ps;
int r;
- r = do_end_io(m, bio, error, mpio);
+ r = do_end_io(m, clone, error, mpio);
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- if (r != DM_ENDIO_INCOMPLETE)
- mempool_free(mpio, m->mpio_pool);
+ mempool_free(mpio, m->mpio_pool);
return r;
}
@@ -1411,7 +1426,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
if (m->current_pgpath) {
bdev = m->current_pgpath->path.dev->bdev;
@@ -1428,22 +1443,113 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
+static int multipath_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *p;
+ int ret = 0;
+
+ list_for_each_entry(pg, &m->priority_groups, list) {
+ list_for_each_entry(p, &pg->pgpaths, list) {
+ ret = fn(ti, p->path.dev, ti->begin, data);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int __pgpath_busy(struct pgpath *pgpath)
+{
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
+
+ return dm_underlying_device_busy(q);
+}
+
+/*
+ * We return "busy", only when we can map I/Os but underlying devices
+ * are busy (so even if we map I/Os now, the I/Os will wait on
+ * the underlying queue).
+ * In other words, if we want to kill I/Os or queue them inside us
+ * due to map unavailability, we don't return "busy". Otherwise,
+ * dm core won't give us the I/Os and we can't do what we want.
+ */
+static int multipath_busy(struct dm_target *ti)
+{
+ int busy = 0, has_active = 0;
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *pgpath;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+
+ /* Guess which priority_group will be used at next mapping time */
+ if (unlikely(!m->current_pgpath && m->next_pg))
+ pg = m->next_pg;
+ else if (likely(m->current_pg))
+ pg = m->current_pg;
+ else
+ /*
+ * We don't know which pg will be used at next mapping time.
+ * We don't call __choose_pgpath() here to avoid to trigger
+ * pg_init just by busy checking.
+ * So we don't know whether underlying devices we will be using
+ * at next mapping time are busy or not. Just try mapping.
+ */
+ goto out;
+
+ /*
+ * If there is one non-busy active path at least, the path selector
+ * will be able to select it. So we consider such a pg as not busy.
+ */
+ busy = 1;
+ list_for_each_entry(pgpath, &pg->pgpaths, list)
+ if (pgpath->is_active) {
+ has_active = 1;
+
+ if (!__pgpath_busy(pgpath)) {
+ busy = 0;
+ break;
+ }
+ }
+
+ if (!has_active)
+ /*
+ * No active path in this pg, so this pg won't be used and
+ * the current_pg will be changed at next mapping time.
+ * We need to try mapping to determine it.
+ */
+ busy = 0;
+
+out:
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return busy;
+}
+
/*-----------------------------------------------------------------
* Module setup
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 0, 5},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
- .map = multipath_map,
- .end_io = multipath_end_io,
+ .map_rq = multipath_map,
+ .rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
.ioctl = multipath_ioctl,
+ .iterate_devices = multipath_iterate_devices,
+ .busy = multipath_busy,
};
static int __init dm_multipath_init(void)
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index 27357b85d73..e7d1fa8b045 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -56,7 +56,8 @@ struct path_selector_type {
* the path fails.
*/
struct dm_path *(*select_path) (struct path_selector *ps,
- unsigned *repeat_count);
+ unsigned *repeat_count,
+ size_t nr_bytes);
/*
* Notify the selector that a path has failed.
@@ -75,7 +76,10 @@ struct path_selector_type {
int (*status) (struct path_selector *ps, struct dm_path *path,
status_type_t type, char *result, unsigned int maxlen);
- int (*end_io) (struct path_selector *ps, struct dm_path *path);
+ int (*start_io) (struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
+ int (*end_io) (struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
};
/* Register a path selector */
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
new file mode 100644
index 00000000000..f92b6cea9d9
--- /dev/null
+++ b/drivers/md/dm-queue-length.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
+ * Copyright (C) 2006-2009 NEC Corporation.
+ *
+ * dm-queue-length.c
+ *
+ * Module Author: Stefan Bader, IBM
+ * Modified by: Kiyoshi Ueda, NEC
+ *
+ * This file is released under the GPL.
+ *
+ * queue-length path selector - choose a path with the least number of
+ * in-flight I/Os.
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+#define DM_MSG_PREFIX "multipath queue-length"
+#define QL_MIN_IO 128
+#define QL_VERSION "0.1.0"
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned repeat_count;
+ atomic_t qlen; /* the number of in-flight I/Os */
+};
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ }
+
+ return s;
+}
+
+static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s = alloc_selector();
+
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+ return 0;
+}
+
+static void ql_free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void ql_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ ql_free_paths(&s->valid_paths);
+ ql_free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int ql_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct path_info *pi;
+
+ /* When called with NULL path, return selector status/args. */
+ if (!path)
+ DMEMIT("0 ");
+ else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d ", atomic_read(&pi->qlen));
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%u ", pi->repeat_count);
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int ql_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned repeat_count = QL_MIN_IO;
+
+ /*
+ * Arguments: [<repeat_count>]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (QL_MIN_IO) is used.
+ */
+ if (argc > 1) {
+ *error = "queue-length ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ *error = "queue-length ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ /* Allocate the path information structure */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "queue-length ps: Error allocating path information";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+ atomic_set(&pi->qlen, 0);
+
+ path->pscontext = pi;
+
+ list_add_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move(&pi->list, &s->failed_paths);
+}
+
+static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+/*
+ * Select a path having the minimum number of in-flight I/Os
+ */
+static struct dm_path *ql_select_path(struct path_selector *ps,
+ unsigned *repeat_count, size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+
+ if (list_empty(&s->valid_paths))
+ return NULL;
+
+ /* Change preferred (first in list) path to evenly balance. */
+ list_move_tail(s->valid_paths.next, &s->valid_paths);
+
+ list_for_each_entry(pi, &s->valid_paths, list) {
+ if (!best ||
+ (atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
+ best = pi;
+
+ if (!atomic_read(&best->qlen))
+ break;
+ }
+
+ if (!best)
+ return NULL;
+
+ *repeat_count = best->repeat_count;
+
+ return best->path;
+}
+
+static int ql_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_inc(&pi->qlen);
+
+ return 0;
+}
+
+static int ql_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_dec(&pi->qlen);
+
+ return 0;
+}
+
+static struct path_selector_type ql_ps = {
+ .name = "queue-length",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 1,
+ .create = ql_create,
+ .destroy = ql_destroy,
+ .status = ql_status,
+ .add_path = ql_add_path,
+ .fail_path = ql_fail_path,
+ .reinstate_path = ql_reinstate_path,
+ .select_path = ql_select_path,
+ .start_io = ql_start_io,
+ .end_io = ql_end_io,
+};
+
+static int __init dm_ql_init(void)
+{
+ int r = dm_register_path_selector(&ql_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " QL_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_ql_exit(void)
+{
+ int r = dm_unregister_path_selector(&ql_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_ql_init);
+module_exit(dm_ql_exit);
+
+MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>");
+MODULE_DESCRIPTION(
+ "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n"
+ DM_NAME " path selector to balance the number of in-flight I/Os"
+);
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 076fbb4e967..ce8868c768c 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1283,9 +1283,23 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
return 0;
}
+static int mirror_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct mirror_set *ms = ti->private;
+ int ret = 0;
+ unsigned i;
+
+ for (i = 0; !ret && i < ms->nr_mirrors; i++)
+ ret = fn(ti, ms->mirror[i].dev,
+ ms->mirror[i].offset, data);
+
+ return ret;
+}
+
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 0, 20},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
@@ -1295,6 +1309,7 @@ static struct target_type mirror_target = {
.postsuspend = mirror_postsuspend,
.resume = mirror_resume,
.status = mirror_status,
+ .iterate_devices = mirror_iterate_devices,
};
static int __init dm_mirror_init(void)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 7b899be0b08..36dbe29f2fd 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -283,7 +283,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
- nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
+ nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
DM_RH_CLEAN : DM_RH_NOSYNC;
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index cdfbf65b28c..24752f449be 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -161,7 +161,7 @@ static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p)
}
static struct dm_path *rr_select_path(struct path_selector *ps,
- unsigned *repeat_count)
+ unsigned *repeat_count, size_t nr_bytes)
{
struct selector *s = (struct selector *) ps->context;
struct path_info *pi = NULL;
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
new file mode 100644
index 00000000000..cfa668f46c4
--- /dev/null
+++ b/drivers/md/dm-service-time.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved.
+ *
+ * Module Author: Kiyoshi Ueda
+ *
+ * This file is released under the GPL.
+ *
+ * Throughput oriented path selector.
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#define DM_MSG_PREFIX "multipath service-time"
+#define ST_MIN_IO 1
+#define ST_MAX_RELATIVE_THROUGHPUT 100
+#define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7
+#define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT)
+#define ST_VERSION "0.2.0"
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned repeat_count;
+ unsigned relative_throughput;
+ atomic_t in_flight_size; /* Total size of in-flight I/Os */
+};
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ }
+
+ return s;
+}
+
+static int st_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s = alloc_selector();
+
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+ return 0;
+}
+
+static void free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void st_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ free_paths(&s->valid_paths);
+ free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int st_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct path_info *pi;
+
+ if (!path)
+ DMEMIT("0 ");
+ else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d %u ", atomic_read(&pi->in_flight_size),
+ pi->relative_throughput);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%u %u ", pi->repeat_count,
+ pi->relative_throughput);
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int st_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned repeat_count = ST_MIN_IO;
+ unsigned relative_throughput = 1;
+
+ /*
+ * Arguments: [<repeat_count> [<relative_throughput>]]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (ST_MIN_IO) is used.
+ * <relative_throughput>: The relative throughput value of
+ * the path among all paths in the path-group.
+ * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
+ * If not given, minimum value '1' is used.
+ * If '0' is given, the path isn't selected while
+ * other paths having a positive value are
+ * available.
+ */
+ if (argc > 2) {
+ *error = "service-time ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if (argc && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ *error = "service-time ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ if ((argc == 2) &&
+ (sscanf(argv[1], "%u", &relative_throughput) != 1 ||
+ relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) {
+ *error = "service-time ps: invalid relative_throughput value";
+ return -EINVAL;
+ }
+
+ /* allocate the path */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "service-time ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+ pi->relative_throughput = relative_throughput;
+ atomic_set(&pi->in_flight_size, 0);
+
+ path->pscontext = pi;
+
+ list_add_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static void st_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move(&pi->list, &s->failed_paths);
+}
+
+static int st_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+/*
+ * Compare the estimated service time of 2 paths, pi1 and pi2,
+ * for the incoming I/O.
+ *
+ * Returns:
+ * < 0 : pi1 is better
+ * 0 : no difference between pi1 and pi2
+ * > 0 : pi2 is better
+ *
+ * Description:
+ * Basically, the service time is estimated by:
+ * ('pi->in-flight-size' + 'incoming') / 'pi->relative_throughput'
+ * To reduce the calculation, some optimizations are made.
+ * (See comments inline)
+ */
+static int st_compare_load(struct path_info *pi1, struct path_info *pi2,
+ size_t incoming)
+{
+ size_t sz1, sz2, st1, st2;
+
+ sz1 = atomic_read(&pi1->in_flight_size);
+ sz2 = atomic_read(&pi2->in_flight_size);
+
+ /*
+ * Case 1: Both have same throughput value. Choose less loaded path.
+ */
+ if (pi1->relative_throughput == pi2->relative_throughput)
+ return sz1 - sz2;
+
+ /*
+ * Case 2a: Both have same load. Choose higher throughput path.
+ * Case 2b: One path has no throughput value. Choose the other one.
+ */
+ if (sz1 == sz2 ||
+ !pi1->relative_throughput || !pi2->relative_throughput)
+ return pi2->relative_throughput - pi1->relative_throughput;
+
+ /*
+ * Case 3: Calculate service time. Choose faster path.
+ * Service time using pi1:
+ * st1 = (sz1 + incoming) / pi1->relative_throughput
+ * Service time using pi2:
+ * st2 = (sz2 + incoming) / pi2->relative_throughput
+ *
+ * To avoid the division, transform the expression to use
+ * multiplication.
+ * Because ->relative_throughput > 0 here, if st1 < st2,
+ * the expressions below are the same meaning:
+ * (sz1 + incoming) / pi1->relative_throughput <
+ * (sz2 + incoming) / pi2->relative_throughput
+ * (sz1 + incoming) * pi2->relative_throughput <
+ * (sz2 + incoming) * pi1->relative_throughput
+ * So use the later one.
+ */
+ sz1 += incoming;
+ sz2 += incoming;
+ if (unlikely(sz1 >= ST_MAX_INFLIGHT_SIZE ||
+ sz2 >= ST_MAX_INFLIGHT_SIZE)) {
+ /*
+ * Size may be too big for multiplying pi->relative_throughput
+ * and overflow.
+ * To avoid the overflow and mis-selection, shift down both.
+ */
+ sz1 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT;
+ sz2 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT;
+ }
+ st1 = sz1 * pi2->relative_throughput;
+ st2 = sz2 * pi1->relative_throughput;
+ if (st1 != st2)
+ return st1 - st2;
+
+ /*
+ * Case 4: Service time is equal. Choose higher throughput path.
+ */
+ return pi2->relative_throughput - pi1->relative_throughput;
+}
+
+static struct dm_path *st_select_path(struct path_selector *ps,
+ unsigned *repeat_count, size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+
+ if (list_empty(&s->valid_paths))
+ return NULL;
+
+ /* Change preferred (first in list) path to evenly balance. */
+ list_move_tail(s->valid_paths.next, &s->valid_paths);
+
+ list_for_each_entry(pi, &s->valid_paths, list)
+ if (!best || (st_compare_load(pi, best, nr_bytes) < 0))
+ best = pi;
+
+ if (!best)
+ return NULL;
+
+ *repeat_count = best->repeat_count;
+
+ return best->path;
+}
+
+static int st_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_add(nr_bytes, &pi->in_flight_size);
+
+ return 0;
+}
+
+static int st_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_sub(nr_bytes, &pi->in_flight_size);
+
+ return 0;
+}
+
+static struct path_selector_type st_ps = {
+ .name = "service-time",
+ .module = THIS_MODULE,
+ .table_args = 2,
+ .info_args = 2,
+ .create = st_create,
+ .destroy = st_destroy,
+ .status = st_status,
+ .add_path = st_add_path,
+ .fail_path = st_fail_path,
+ .reinstate_path = st_reinstate_path,
+ .select_path = st_select_path,
+ .start_io = st_start_io,
+ .end_io = st_end_io,
+};
+
+static int __init dm_st_init(void)
+{
+ int r = dm_register_path_selector(&st_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " ST_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_st_exit(void)
+{
+ int r = dm_unregister_path_selector(&st_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_st_init);
+module_exit(dm_st_exit);
+
+MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector");
+MODULE_AUTHOR("Kiyoshi Ueda <k-ueda@ct.jp.nec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2662a41337e..6e3fe4f1493 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -636,7 +636,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, WRITE))
+ if (ps->valid && area_io(ps, WRITE_BARRIER))
ps->valid = 0;
/*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d73f17fc777..d573165cd2b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -678,6 +678,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = s;
ti->split_io = s->store->chunk_size;
+ ti->num_flush_requests = 1;
return 0;
@@ -1030,6 +1031,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ if (unlikely(bio_empty_barrier(bio))) {
+ bio->bi_bdev = s->store->cow->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
+
chunk = sector_to_chunk(s->store, bio->bi_sector);
/* Full snapshots are not usable */
@@ -1338,6 +1344,8 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = dev;
+ ti->num_flush_requests = 1;
+
return 0;
}
@@ -1353,6 +1361,9 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
+ if (unlikely(bio_empty_barrier(bio)))
+ return DM_MAPIO_REMAPPED;
+
/* Only tell snapshots if this is a write */
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 41569bc60ab..b240e85ae39 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -167,6 +167,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
sc->stripes = stripes;
sc->stripe_width = width;
ti->split_io = chunk_size;
+ ti->num_flush_requests = stripes;
sc->chunk_mask = ((sector_t) chunk_size) - 1;
for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
@@ -211,10 +212,18 @@ static int stripe_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
+ sector_t offset, chunk;
+ uint32_t stripe;
- sector_t offset = bio->bi_sector - ti->begin;
- sector_t chunk = offset >> sc->chunk_shift;
- uint32_t stripe = sector_div(chunk, sc->stripes);
+ if (unlikely(bio_empty_barrier(bio))) {
+ BUG_ON(map_context->flush_request >= sc->stripes);
+ bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
+
+ offset = bio->bi_sector - ti->begin;
+ chunk = offset >> sc->chunk_shift;
+ stripe = sector_div(chunk, sc->stripes);
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
bio->bi_sector = sc->stripe[stripe].physical_start +
@@ -304,15 +313,31 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
return error;
}
+static int stripe_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct stripe_c *sc = ti->private;
+ int ret = 0;
+ unsigned i = 0;
+
+ do
+ ret = fn(ti, sc->stripe[i].dev,
+ sc->stripe[i].physical_start, data);
+ while (!ret && ++i < sc->stripes);
+
+ return ret;
+}
+
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
.map = stripe_map,
.end_io = stripe_end_io,
.status = stripe_status,
+ .iterate_devices = stripe_iterate_devices,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index a2a45e6c7c8..4b045903a4e 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -57,12 +57,21 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
return strlen(buf);
}
+static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
+{
+ sprintf(buf, "%d\n", dm_suspended(md));
+
+ return strlen(buf);
+}
+
static DM_ATTR_RO(name);
static DM_ATTR_RO(uuid);
+static DM_ATTR_RO(suspended);
static struct attribute *dm_attrs[] = {
&dm_attr_name.attr,
&dm_attr_uuid.attr,
+ &dm_attr_suspended.attr,
NULL,
};
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e9a73bb242b..2cba557d9e6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -41,6 +41,7 @@
struct dm_table {
struct mapped_device *md;
atomic_t holders;
+ unsigned type;
/* btree table */
unsigned int depth;
@@ -62,15 +63,11 @@ struct dm_table {
/* a list of devices used by this table */
struct list_head devices;
- /*
- * These are optimistic limits taken from all the
- * targets, some targets will need smaller limits.
- */
- struct io_restrictions limits;
-
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
+
+ struct dm_md_mempools *mempools;
};
/*
@@ -89,43 +86,6 @@ static unsigned int int_log(unsigned int n, unsigned int base)
}
/*
- * Returns the minimum that is _not_ zero, unless both are zero.
- */
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
-/*
- * Combine two io_restrictions, always taking the lower value.
- */
-static void combine_restrictions_low(struct io_restrictions *lhs,
- struct io_restrictions *rhs)
-{
- lhs->max_sectors =
- min_not_zero(lhs->max_sectors, rhs->max_sectors);
-
- lhs->max_phys_segments =
- min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
-
- lhs->max_hw_segments =
- min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
-
- lhs->logical_block_size = max(lhs->logical_block_size,
- rhs->logical_block_size);
-
- lhs->max_segment_size =
- min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
-
- lhs->max_hw_sectors =
- min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
-
- lhs->seg_boundary_mask =
- min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
-
- lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
-
- lhs->no_cluster |= rhs->no_cluster;
-}
-
-/*
* Calculate the index of the child node of the n'th node k'th key.
*/
static inline unsigned int get_child(unsigned int n, unsigned int k)
@@ -267,6 +227,8 @@ static void free_devices(struct list_head *devices)
list_for_each_safe(tmp, next, devices) {
struct dm_dev_internal *dd =
list_entry(tmp, struct dm_dev_internal, list);
+ DMWARN("dm_table_destroy: dm_put_device call missing for %s",
+ dd->dm_dev.name);
kfree(dd);
}
}
@@ -296,12 +258,10 @@ void dm_table_destroy(struct dm_table *t)
vfree(t->highs);
/* free the device list */
- if (t->devices.next != &t->devices) {
- DMWARN("devices still present during destroy: "
- "dm_table_remove_device calls missing");
-
+ if (t->devices.next != &t->devices)
free_devices(&t->devices);
- }
+
+ dm_free_md_mempools(t->mempools);
kfree(t);
}
@@ -385,15 +345,48 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
/*
* If possible, this checks an area of a destination device is valid.
*/
-static int check_device_area(struct dm_dev_internal *dd, sector_t start,
- sector_t len)
+static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, void *data)
{
- sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
+ sector_t dev_size =
+ i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ unsigned short logical_block_size_sectors =
+ limits->logical_block_size >> SECTOR_SHIFT;
+ char b[BDEVNAME_SIZE];
if (!dev_size)
return 1;
- return ((start < dev_size) && (len <= (dev_size - start)));
+ if ((start >= dev_size) || (start + ti->len > dev_size)) {
+ DMWARN("%s: %s too small for target",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
+ return 0;
+ }
+
+ if (logical_block_size_sectors <= 1)
+ return 1;
+
+ if (start & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: start=%llu not aligned to h/w "
+ "logical block size %hu of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 0;
+ }
+
+ if (ti->len & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: len=%llu not aligned to h/w "
+ "logical block size %hu of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)ti->len,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 0;
+ }
+
+ return 1;
}
/*
@@ -479,38 +472,32 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
}
atomic_inc(&dd->count);
- if (!check_device_area(dd, start, len)) {
- DMWARN("device %s too small for target", path);
- dm_put_device(ti, &dd->dm_dev);
- return -EINVAL;
- }
-
*result = &dd->dm_dev;
-
return 0;
}
-void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
+/*
+ * Returns the minimum that is _not_ zero, unless both are zero.
+ */
+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+
+int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, void *data)
{
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
- struct io_restrictions *rs = &ti->limits;
char b[BDEVNAME_SIZE];
if (unlikely(!q)) {
DMWARN("%s: Cannot set limits for nonexistent device %s",
dm_device_name(ti->table->md), bdevname(bdev, b));
- return;
+ return 0;
}
- /*
- * Combine the device limits low.
- *
- * FIXME: if we move an io_restriction struct
- * into q this would just be a call to
- * combine_restrictions_low()
- */
- rs->max_sectors =
- min_not_zero(rs->max_sectors, queue_max_sectors(q));
+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
+ DMWARN("%s: target device %s is misaligned",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
/*
* Check if merge fn is supported.
@@ -519,48 +506,21 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
*/
if (q->merge_bvec_fn && !ti->type->merge)
- rs->max_sectors =
- min_not_zero(rs->max_sectors,
+ limits->max_sectors =
+ min_not_zero(limits->max_sectors,
(unsigned int) (PAGE_SIZE >> 9));
-
- rs->max_phys_segments =
- min_not_zero(rs->max_phys_segments,
- queue_max_phys_segments(q));
-
- rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
-
- rs->logical_block_size = max(rs->logical_block_size,
- queue_logical_block_size(q));
-
- rs->max_segment_size =
- min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
-
- rs->max_hw_sectors =
- min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
-
- rs->seg_boundary_mask =
- min_not_zero(rs->seg_boundary_mask,
- queue_segment_boundary(q));
-
- rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
-
- rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
sector_t len, fmode_t mode, struct dm_dev **result)
{
- int r = __table_get_device(ti->table, ti, path,
- start, len, mode, result);
-
- if (!r)
- dm_set_device_limits(ti, (*result)->bdev);
-
- return r;
+ return __table_get_device(ti->table, ti, path,
+ start, len, mode, result);
}
+
/*
* Decrement a devices use count and remove it if necessary.
*/
@@ -675,24 +635,78 @@ int dm_split_args(int *argc, char ***argvp, char *input)
return 0;
}
-static void check_for_valid_limits(struct io_restrictions *rs)
+/*
+ * Impose necessary and sufficient conditions on a devices's table such
+ * that any incoming bio which respects its logical_block_size can be
+ * processed successfully. If it falls across the boundary between
+ * two or more targets, the size of each piece it gets split into must
+ * be compatible with the logical_block_size of the target processing it.
+ */
+static int validate_hardware_logical_block_alignment(struct dm_table *table,
+ struct queue_limits *limits)
{
- if (!rs->max_sectors)
- rs->max_sectors = SAFE_MAX_SECTORS;
- if (!rs->max_hw_sectors)
- rs->max_hw_sectors = SAFE_MAX_SECTORS;
- if (!rs->max_phys_segments)
- rs->max_phys_segments = MAX_PHYS_SEGMENTS;
- if (!rs->max_hw_segments)
- rs->max_hw_segments = MAX_HW_SEGMENTS;
- if (!rs->logical_block_size)
- rs->logical_block_size = 1 << SECTOR_SHIFT;
- if (!rs->max_segment_size)
- rs->max_segment_size = MAX_SEGMENT_SIZE;
- if (!rs->seg_boundary_mask)
- rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
- if (!rs->bounce_pfn)
- rs->bounce_pfn = -1;
+ /*
+ * This function uses arithmetic modulo the logical_block_size
+ * (in units of 512-byte sectors).
+ */
+ unsigned short device_logical_block_size_sects =
+ limits->logical_block_size >> SECTOR_SHIFT;
+
+ /*
+ * Offset of the start of the next table entry, mod logical_block_size.
+ */
+ unsigned short next_target_start = 0;
+
+ /*
+ * Given an aligned bio that extends beyond the end of a
+ * target, how many sectors must the next target handle?
+ */
+ unsigned short remaining = 0;
+
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ /*
+ * Check each entry in the table in turn.
+ */
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ blk_set_default_limits(&ti_limits);
+
+ /* combine all target devices' limits */
+ if (ti->type->iterate_devices)
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /*
+ * If the remaining sectors fall entirely within this
+ * table entry are they compatible with its logical_block_size?
+ */
+ if (remaining < ti->len &&
+ remaining & ((ti_limits.logical_block_size >>
+ SECTOR_SHIFT) - 1))
+ break; /* Error */
+
+ next_target_start =
+ (unsigned short) ((next_target_start + ti->len) &
+ (device_logical_block_size_sects - 1));
+ remaining = next_target_start ?
+ device_logical_block_size_sects - next_target_start : 0;
+ }
+
+ if (remaining) {
+ DMWARN("%s: table line %u (start sect %llu len %llu) "
+ "not aligned to h/w logical block size %hu",
+ dm_device_name(table->md), i,
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len,
+ limits->logical_block_size);
+ return -EINVAL;
+ }
+
+ return 0;
}
int dm_table_add_target(struct dm_table *t, const char *type,
@@ -747,9 +761,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- /* FIXME: the plan is to combine high here and then have
- * the merge fn apply the target level restrictions. */
- combine_restrictions_low(&t->limits, &tgt->limits);
return 0;
bad:
@@ -758,6 +769,104 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
+int dm_table_set_type(struct dm_table *t)
+{
+ unsigned i;
+ unsigned bio_based = 0, request_based = 0;
+ struct dm_target *tgt;
+ struct dm_dev_internal *dd;
+ struct list_head *devices;
+
+ for (i = 0; i < t->num_targets; i++) {
+ tgt = t->targets + i;
+ if (dm_target_request_based(tgt))
+ request_based = 1;
+ else
+ bio_based = 1;
+
+ if (bio_based && request_based) {
+ DMWARN("Inconsistent table: different target types"
+ " can't be mixed up");
+ return -EINVAL;
+ }
+ }
+
+ if (bio_based) {
+ /* We must use this table as bio-based */
+ t->type = DM_TYPE_BIO_BASED;
+ return 0;
+ }
+
+ BUG_ON(!request_based); /* No targets in this table */
+
+ /* Non-request-stackable devices can't be used for request-based dm */
+ devices = dm_table_get_devices(t);
+ list_for_each_entry(dd, devices, list) {
+ if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
+ DMWARN("table load rejected: including"
+ " non-request-stackable devices");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Request-based dm supports only tables that have a single target now.
+ * To support multiple targets, request splitting support is needed,
+ * and that needs lots of changes in the block-layer.
+ * (e.g. request completion process for partial completion.)
+ */
+ if (t->num_targets > 1) {
+ DMWARN("Request-based dm doesn't support multiple targets yet");
+ return -EINVAL;
+ }
+
+ t->type = DM_TYPE_REQUEST_BASED;
+
+ return 0;
+}
+
+unsigned dm_table_get_type(struct dm_table *t)
+{
+ return t->type;
+}
+
+bool dm_table_bio_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
+}
+
+bool dm_table_request_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
+}
+
+int dm_table_alloc_md_mempools(struct dm_table *t)
+{
+ unsigned type = dm_table_get_type(t);
+
+ if (unlikely(type == DM_TYPE_NONE)) {
+ DMWARN("no table type is set, can't allocate mempools");
+ return -EINVAL;
+ }
+
+ t->mempools = dm_alloc_md_mempools(type);
+ if (!t->mempools)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dm_table_free_md_mempools(struct dm_table *t)
+{
+ dm_free_md_mempools(t->mempools);
+ t->mempools = NULL;
+}
+
+struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
+{
+ return t->mempools;
+}
+
static int setup_indexes(struct dm_table *t)
{
int i;
@@ -792,8 +901,6 @@ int dm_table_complete(struct dm_table *t)
int r = 0;
unsigned int leaf_nodes;
- check_for_valid_limits(&t->limits);
-
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
@@ -869,6 +976,57 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
}
/*
+ * Establish the new table's queue_limits and validate them.
+ */
+int dm_calculate_queue_limits(struct dm_table *table,
+ struct queue_limits *limits)
+{
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ blk_set_default_limits(limits);
+
+ while (i < dm_table_get_num_targets(table)) {
+ blk_set_default_limits(&ti_limits);
+
+ ti = dm_table_get_target(table, i++);
+
+ if (!ti->type->iterate_devices)
+ goto combine_limits;
+
+ /*
+ * Combine queue limits of all the devices this target uses.
+ */
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /*
+ * Check each device area is consistent with the target's
+ * overall queue limits.
+ */
+ if (!ti->type->iterate_devices(ti, device_area_is_valid,
+ &ti_limits))
+ return -EINVAL;
+
+combine_limits:
+ /*
+ * Merge this target's queue limits into the overall limits
+ * for the table.
+ */
+ if (blk_stack_limits(limits, &ti_limits, 0) < 0)
+ DMWARN("%s: target device "
+ "(start sect %llu len %llu) "
+ "is misaligned",
+ dm_device_name(table->md),
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len);
+ }
+
+ return validate_hardware_logical_block_alignment(table, limits);
+}
+
+/*
* Set the integrity profile for this device if all devices used have
* matching profiles.
*/
@@ -907,27 +1065,42 @@ no_integrity:
return;
}
-void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
+void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits)
{
/*
- * Make sure we obey the optimistic sub devices
- * restrictions.
+ * Each target device in the table has a data area that should normally
+ * be aligned such that the DM device's alignment_offset is 0.
+ * FIXME: Propagate alignment_offsets up the stack and warn of
+ * sub-optimal or inconsistent settings.
+ */
+ limits->alignment_offset = 0;
+ limits->misaligned = 0;
+
+ /*
+ * Copy table's limits to the DM device's request_queue
*/
- blk_queue_max_sectors(q, t->limits.max_sectors);
- blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
- blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
- blk_queue_logical_block_size(q, t->limits.logical_block_size);
- blk_queue_max_segment_size(q, t->limits.max_segment_size);
- blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
- blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
- blk_queue_bounce_limit(q, t->limits.bounce_pfn);
-
- if (t->limits.no_cluster)
+ q->limits = *limits;
+
+ if (limits->no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
dm_table_set_integrity(t);
+
+ /*
+ * QUEUE_FLAG_STACKABLE must be set after all queue settings are
+ * visible to other CPUs because, once the flag is set, incoming bios
+ * are processed by request-based dm, which refers to the queue
+ * settings.
+ * Until the flag set, bios are passed to bio-based dm and queued to
+ * md->deferred where queue settings are not needed yet.
+ * Those bios are passed to request-based dm at the resume time.
+ */
+ smp_mb();
+ if (dm_table_request_based(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -1023,6 +1196,20 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
return r;
}
+int dm_table_any_busy_target(struct dm_table *t)
+{
+ unsigned i;
+ struct dm_target *ti;
+
+ for (i = 0; i < t->num_targets; i++) {
+ ti = t->targets + i;
+ if (ti->type->busy && ti->type->busy(ti))
+ return 1;
+ }
+
+ return 0;
+}
+
void dm_table_unplug_all(struct dm_table *t)
{
struct dm_dev_internal *dd;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 48db308fae6..9acd54a5cff 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,13 @@
#define DM_MSG_PREFIX "core"
+/*
+ * Cookies are numeric values sent with CHANGE and REMOVE
+ * uevents while resuming, removing or renaming the device.
+ */
+#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
+#define DM_COOKIE_LENGTH 24
+
static const char *_name = DM_NAME;
static unsigned int major = 0;
@@ -71,7 +78,7 @@ struct dm_rq_target_io {
*/
struct dm_rq_clone_bio_info {
struct bio *orig;
- struct request *rq;
+ struct dm_rq_target_io *tio;
};
union map_info *dm_get_mapinfo(struct bio *bio)
@@ -81,6 +88,14 @@ union map_info *dm_get_mapinfo(struct bio *bio)
return NULL;
}
+union map_info *dm_get_rq_mapinfo(struct request *rq)
+{
+ if (rq && rq->end_io_data)
+ return &((struct dm_rq_target_io *)rq->end_io_data)->info;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -157,13 +172,31 @@ struct mapped_device {
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
- struct block_device *suspended_bdev;
+ struct block_device *bdev;
/* forced geometry settings */
struct hd_geometry geometry;
+ /* marker of flush suspend for request-based dm */
+ struct request suspend_rq;
+
+ /* For saving the address of __make_request for request based dm */
+ make_request_fn *saved_make_request_fn;
+
/* sysfs handle */
struct kobject kobj;
+
+ /* zero-length barrier that will be cloned and submitted to targets */
+ struct bio barrier_bio;
+};
+
+/*
+ * For mempools pre-allocation at the table loading time.
+ */
+struct dm_md_mempools {
+ mempool_t *io_pool;
+ mempool_t *tio_pool;
+ struct bio_set *bs;
};
#define MIN_IOS 256
@@ -391,14 +424,29 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
mempool_free(io, md->io_pool);
}
-static struct dm_target_io *alloc_tio(struct mapped_device *md)
+static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{
- return mempool_alloc(md->tio_pool, GFP_NOIO);
+ mempool_free(tio, md->tio_pool);
}
-static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
+static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
{
- mempool_free(tio, md->tio_pool);
+ return mempool_alloc(md->tio_pool, GFP_ATOMIC);
+}
+
+static void free_rq_tio(struct dm_rq_target_io *tio)
+{
+ mempool_free(tio, tio->md->tio_pool);
+}
+
+static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
+{
+ return mempool_alloc(md->io_pool, GFP_ATOMIC);
+}
+
+static void free_bio_info(struct dm_rq_clone_bio_info *info)
+{
+ mempool_free(info, info->tio->md->io_pool);
}
static void start_io_acct(struct dm_io *io)
@@ -464,12 +512,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
struct dm_table *dm_get_table(struct mapped_device *md)
{
struct dm_table *t;
+ unsigned long flags;
- read_lock(&md->map_lock);
+ read_lock_irqsave(&md->map_lock, flags);
t = md->map;
if (t)
dm_table_get(t);
- read_unlock(&md->map_lock);
+ read_unlock_irqrestore(&md->map_lock, flags);
return t;
}
@@ -536,9 +585,11 @@ static void dec_pending(struct dm_io *io, int error)
* Target requested pushing back the I/O.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
- if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
- else
+ if (__noflush_suspending(md)) {
+ if (!bio_barrier(io->bio))
+ bio_list_add_head(&md->deferred,
+ io->bio);
+ } else
/* noflush suspend was interrupted. */
io->error = -EIO;
spin_unlock_irqrestore(&md->deferred_lock, flags);
@@ -553,7 +604,8 @@ static void dec_pending(struct dm_io *io, int error)
* a per-device variable for error reporting.
* Note that you can't touch the bio after end_io_acct
*/
- md->barrier_error = io_error;
+ if (!md->barrier_error && io_error != -EOPNOTSUPP)
+ md->barrier_error = io_error;
end_io_acct(io);
} else {
end_io_acct(io);
@@ -607,6 +659,262 @@ static void clone_endio(struct bio *bio, int error)
dec_pending(io, error);
}
+/*
+ * Partial completion handling for request-based dm
+ */
+static void end_clone_bio(struct bio *clone, int error)
+{
+ struct dm_rq_clone_bio_info *info = clone->bi_private;
+ struct dm_rq_target_io *tio = info->tio;
+ struct bio *bio = info->orig;
+ unsigned int nr_bytes = info->orig->bi_size;
+
+ bio_put(clone);
+
+ if (tio->error)
+ /*
+ * An error has already been detected on the request.
+ * Once error occurred, just let clone->end_io() handle
+ * the remainder.
+ */
+ return;
+ else if (error) {
+ /*
+ * Don't notice the error to the upper layer yet.
+ * The error handling decision is made by the target driver,
+ * when the request is completed.
+ */
+ tio->error = error;
+ return;
+ }
+
+ /*
+ * I/O for the bio successfully completed.
+ * Notice the data completion to the upper layer.
+ */
+
+ /*
+ * bios are processed from the head of the list.
+ * So the completing bio should always be rq->bio.
+ * If it's not, something wrong is happening.
+ */
+ if (tio->orig->bio != bio)
+ DMERR("bio completion is going in the middle of the request");
+
+ /*
+ * Update the original request.
+ * Do not use blk_end_request() here, because it may complete
+ * the original request before the clone, and break the ordering.
+ */
+ blk_update_request(tio->orig, 0, nr_bytes);
+}
+
+/*
+ * Don't touch any member of the md after calling this function because
+ * the md may be freed in dm_put() at the end of this function.
+ * Or do dm_get() before calling this function and dm_put() later.
+ */
+static void rq_completed(struct mapped_device *md, int run_queue)
+{
+ int wakeup_waiters = 0;
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!queue_in_flight(q))
+ wakeup_waiters = 1;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /* nudge anyone waiting on suspend queue */
+ if (wakeup_waiters)
+ wake_up(&md->wait);
+
+ if (run_queue)
+ blk_run_queue(q);
+
+ /*
+ * dm_put() must be at the end of this function. See the comment above
+ */
+ dm_put(md);
+}
+
+static void dm_unprep_request(struct request *rq)
+{
+ struct request *clone = rq->special;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ rq->special = NULL;
+ rq->cmd_flags &= ~REQ_DONTPREP;
+
+ blk_rq_unprep_clone(clone);
+ free_rq_tio(tio);
+}
+
+/*
+ * Requeue the original request of a clone.
+ */
+void dm_requeue_unmapped_request(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ dm_unprep_request(rq);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (elv_queue_empty(q))
+ blk_plug_device(q);
+ blk_requeue_request(q, rq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ rq_completed(md, 0);
+}
+EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
+
+static void __stop_queue(struct request_queue *q)
+{
+ blk_stop_queue(q);
+}
+
+static void stop_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void __start_queue(struct request_queue *q)
+{
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+}
+
+static void start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Complete the clone and the original request.
+ * Must be called without queue lock.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+
+ if (blk_pc_request(rq)) {
+ rq->errors = clone->errors;
+ rq->resid_len = clone->resid_len;
+
+ if (rq->sense)
+ /*
+ * We are using the sense buffer of the original
+ * request.
+ * So setting the length of the sense data is enough.
+ */
+ rq->sense_len = clone->sense_len;
+ }
+
+ BUG_ON(clone->bio);
+ free_rq_tio(tio);
+
+ blk_end_request_all(rq, error);
+
+ rq_completed(md, 1);
+}
+
+/*
+ * Request completion handler for request-based dm
+ */
+static void dm_softirq_done(struct request *rq)
+{
+ struct request *clone = rq->completion_data;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ int error = tio->error;
+
+ if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io)
+ error = rq_end_io(tio->ti, clone, error, &tio->info);
+
+ if (error <= 0)
+ /* The target wants to complete the I/O */
+ dm_end_request(clone, error);
+ else if (error == DM_ENDIO_INCOMPLETE)
+ /* The target will handle the I/O */
+ return;
+ else if (error == DM_ENDIO_REQUEUE)
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ else {
+ DMWARN("unimplemented target endio return value: %d", error);
+ BUG();
+ }
+}
+
+/*
+ * Complete the clone and the original request with the error status
+ * through softirq context.
+ */
+static void dm_complete_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct request *rq = tio->orig;
+
+ tio->error = error;
+ rq->completion_data = clone;
+ blk_complete_request(rq);
+}
+
+/*
+ * Complete the not-mapped clone and the original request with the error status
+ * through softirq context.
+ * Target's rq_end_io() function isn't called.
+ * This may be used when the target's map_rq() function fails.
+ */
+void dm_kill_unmapped_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct request *rq = tio->orig;
+
+ rq->cmd_flags |= REQ_FAILED;
+ dm_complete_request(clone, error);
+}
+EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
+
+/*
+ * Called with the queue lock held
+ */
+static void end_clone_request(struct request *clone, int error)
+{
+ /*
+ * For just cleaning up the information of the queue in which
+ * the clone was dispatched.
+ * The clone is *NOT* freed actually here because it is alloced from
+ * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
+ */
+ __blk_put_request(clone->q, clone);
+
+ /*
+ * Actual request completion is done in a softirq context which doesn't
+ * hold the queue lock. Otherwise, deadlock could occur because:
+ * - another request may be submitted by the upper level driver
+ * of the stacking during the completion
+ * - the submission which requires queue lock may be done
+ * against this queue
+ */
+ dm_complete_request(clone, error);
+}
+
static sector_t max_io_len(struct mapped_device *md,
sector_t sector, struct dm_target *ti)
{
@@ -634,11 +942,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
sector_t sector;
struct mapped_device *md;
- /*
- * Sanity checks.
- */
- BUG_ON(!clone->bi_size);
-
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
@@ -714,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_flags |= 1 << BIO_CLONED;
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, offset), len);
}
@@ -742,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
bio_integrity_trim(clone,
@@ -752,6 +1055,48 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
return clone;
}
+static struct dm_target_io *alloc_tio(struct clone_info *ci,
+ struct dm_target *ti)
+{
+ struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
+
+ tio->io = ci->io;
+ tio->ti = ti;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ return tio;
+}
+
+static void __flush_target(struct clone_info *ci, struct dm_target *ti,
+ unsigned flush_nr)
+{
+ struct dm_target_io *tio = alloc_tio(ci, ti);
+ struct bio *clone;
+
+ tio->info.flush_request = flush_nr;
+
+ clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+ __bio_clone(clone, ci->bio);
+ clone->bi_destructor = dm_bio_destructor;
+
+ __map_bio(ti, clone, tio);
+}
+
+static int __clone_and_map_empty_barrier(struct clone_info *ci)
+{
+ unsigned target_nr = 0, flush_nr;
+ struct dm_target *ti;
+
+ while ((ti = dm_table_get_target(ci->map, target_nr++)))
+ for (flush_nr = 0; flush_nr < ti->num_flush_requests;
+ flush_nr++)
+ __flush_target(ci, ti, flush_nr);
+
+ ci->sector_count = 0;
+
+ return 0;
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
@@ -759,6 +1104,9 @@ static int __clone_and_map(struct clone_info *ci)
sector_t len = 0, max;
struct dm_target_io *tio;
+ if (unlikely(bio_empty_barrier(bio)))
+ return __clone_and_map_empty_barrier(ci);
+
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
@@ -768,10 +1116,7 @@ static int __clone_and_map(struct clone_info *ci)
/*
* Allocate a target io object.
*/
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
+ tio = alloc_tio(ci, ti);
if (ci->sector_count <= max) {
/*
@@ -827,10 +1172,7 @@ static int __clone_and_map(struct clone_info *ci)
max = max_io_len(ci->md, ci->sector, ti);
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
+ tio = alloc_tio(ci, ti);
}
len = min(remaining, max);
@@ -865,7 +1207,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
if (!bio_barrier(bio))
bio_io_error(bio);
else
- md->barrier_error = -EIO;
+ if (!md->barrier_error)
+ md->barrier_error = -EIO;
return;
}
@@ -878,6 +1221,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.io->md = md;
ci.sector = bio->bi_sector;
ci.sector_count = bio_sectors(bio);
+ if (unlikely(bio_empty_barrier(bio)))
+ ci.sector_count = 1;
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
@@ -925,6 +1270,16 @@ static int dm_merge_bvec(struct request_queue *q,
*/
if (max_size && ti->type->merge)
max_size = ti->type->merge(ti, bvm, biovec, max_size);
+ /*
+ * If the target doesn't support merge method and some of the devices
+ * provided their merge_bvec method (we know this by looking at
+ * queue_max_hw_sectors), then we can't allow bios with multiple vector
+ * entries. So always set max_size to 0, and the code below allows
+ * just one page.
+ */
+ else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
+
+ max_size = 0;
out_table:
dm_table_put(map);
@@ -943,7 +1298,7 @@ out:
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int dm_request(struct request_queue *q, struct bio *bio)
+static int _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -980,12 +1335,274 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return 0;
}
+static int dm_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct mapped_device *md = q->queuedata;
+
+ if (unlikely(bio_barrier(bio))) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
+
+ return md->saved_make_request_fn(q, bio); /* call __make_request() */
+}
+
+static int dm_request_based(struct mapped_device *md)
+{
+ return blk_queue_stackable(md->queue);
+}
+
+static int dm_request(struct request_queue *q, struct bio *bio)
+{
+ struct mapped_device *md = q->queuedata;
+
+ if (dm_request_based(md))
+ return dm_make_request(q, bio);
+
+ return _dm_request(q, bio);
+}
+
+void dm_dispatch_request(struct request *rq)
+{
+ int r;
+
+ if (blk_queue_io_stat(rq->q))
+ rq->cmd_flags |= REQ_IO_STAT;
+
+ rq->start_time = jiffies;
+ r = blk_insert_cloned_request(rq->q, rq);
+ if (r)
+ dm_complete_request(rq, r);
+}
+EXPORT_SYMBOL_GPL(dm_dispatch_request);
+
+static void dm_rq_bio_destructor(struct bio *bio)
+{
+ struct dm_rq_clone_bio_info *info = bio->bi_private;
+ struct mapped_device *md = info->tio->md;
+
+ free_bio_info(info);
+ bio_free(bio, md->bs);
+}
+
+static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
+ void *data)
+{
+ struct dm_rq_target_io *tio = data;
+ struct mapped_device *md = tio->md;
+ struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
+
+ if (!info)
+ return -ENOMEM;
+
+ info->orig = bio_orig;
+ info->tio = tio;
+ bio->bi_end_io = end_clone_bio;
+ bio->bi_private = info;
+ bio->bi_destructor = dm_rq_bio_destructor;
+
+ return 0;
+}
+
+static int setup_clone(struct request *clone, struct request *rq,
+ struct dm_rq_target_io *tio)
+{
+ int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
+ dm_rq_bio_constructor, tio);
+
+ if (r)
+ return r;
+
+ clone->cmd = rq->cmd;
+ clone->cmd_len = rq->cmd_len;
+ clone->sense = rq->sense;
+ clone->buffer = rq->buffer;
+ clone->end_io = end_clone_request;
+ clone->end_io_data = tio;
+
+ return 0;
+}
+
+static int dm_rq_flush_suspending(struct mapped_device *md)
+{
+ return !md->suspend_rq.special;
+}
+
+/*
+ * Called with the queue lock held.
+ */
+static int dm_prep_fn(struct request_queue *q, struct request *rq)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_rq_target_io *tio;
+ struct request *clone;
+
+ if (unlikely(rq == &md->suspend_rq)) {
+ if (dm_rq_flush_suspending(md))
+ return BLKPREP_OK;
+ else
+ /* The flush suspend was interrupted */
+ return BLKPREP_KILL;
+ }
+
+ if (unlikely(rq->special)) {
+ DMWARN("Already has something in rq->special.");
+ return BLKPREP_KILL;
+ }
+
+ tio = alloc_rq_tio(md); /* Only one for each original request */
+ if (!tio)
+ /* -ENOMEM */
+ return BLKPREP_DEFER;
+
+ tio->md = md;
+ tio->ti = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ clone = &tio->clone;
+ if (setup_clone(clone, rq, tio)) {
+ /* -ENOMEM */
+ free_rq_tio(tio);
+ return BLKPREP_DEFER;
+ }
+
+ rq->special = clone;
+ rq->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+static void map_request(struct dm_target *ti, struct request *rq,
+ struct mapped_device *md)
+{
+ int r;
+ struct request *clone = rq->special;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+
+ tio->ti = ti;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ switch (r) {
+ case DM_MAPIO_SUBMITTED:
+ /* The target has taken the I/O to submit by itself later */
+ break;
+ case DM_MAPIO_REMAPPED:
+ /* The target has remapped the I/O so dispatch it */
+ dm_dispatch_request(clone);
+ break;
+ case DM_MAPIO_REQUEUE:
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ break;
+ default:
+ if (r > 0) {
+ DMWARN("unimplemented target map return value: %d", r);
+ BUG();
+ }
+
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(clone, r);
+ break;
+ }
+}
+
+/*
+ * q->request_fn for request-based dm.
+ * Called with the queue lock held.
+ */
+static void dm_request_fn(struct request_queue *q)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+ struct dm_target *ti;
+ struct request *rq;
+
+ /*
+ * For noflush suspend, check blk_queue_stopped() to immediately
+ * quit I/O dispatching.
+ */
+ while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
+ rq = blk_peek_request(q);
+ if (!rq)
+ goto plug_and_out;
+
+ if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */
+ if (queue_in_flight(q))
+ /* Not quiet yet. Wait more */
+ goto plug_and_out;
+
+ /* This device should be quiet now */
+ __stop_queue(q);
+ blk_start_request(rq);
+ __blk_end_request_all(rq, 0);
+ wake_up(&md->wait);
+ goto out;
+ }
+
+ ti = dm_table_find_target(map, blk_rq_pos(rq));
+ if (ti->type->busy && ti->type->busy(ti))
+ goto plug_and_out;
+
+ blk_start_request(rq);
+ spin_unlock(q->queue_lock);
+ map_request(ti, rq, md);
+ spin_lock_irq(q->queue_lock);
+ }
+
+ goto out;
+
+plug_and_out:
+ if (!elv_queue_empty(q))
+ /* Some requests still remain, retry later */
+ blk_plug_device(q);
+
+out:
+ dm_table_put(map);
+
+ return;
+}
+
+int dm_underlying_device_busy(struct request_queue *q)
+{
+ return blk_lld_busy(q);
+}
+EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
+
+static int dm_lld_busy(struct request_queue *q)
+{
+ int r;
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+
+ if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
+ r = 1;
+ else
+ r = dm_table_any_busy_target(map);
+
+ dm_table_put(map);
+
+ return r;
+}
+
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_table(md);
if (map) {
+ if (dm_request_based(md))
+ generic_unplug_device(q);
+
dm_table_unplug_all(map);
dm_table_put(map);
}
@@ -1000,7 +1617,16 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
map = dm_get_table(md);
if (map) {
- r = dm_table_any_congested(map, bdi_bits);
+ /*
+ * Request-based dm cares about only own queue for
+ * the query about congestion status of request_queue
+ */
+ if (dm_request_based(md))
+ r = md->queue->backing_dev_info.state &
+ bdi_bits;
+ else
+ r = dm_table_any_congested(map, bdi_bits);
+
dm_table_put(map);
}
}
@@ -1123,30 +1749,32 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_alloc_queue(GFP_KERNEL);
+ md->queue = blk_init_queue(dm_request_fn, NULL);
if (!md->queue)
goto bad_queue;
+ /*
+ * Request-based dm devices cannot be stacked on top of bio-based dm
+ * devices. The type of this dm device has not been decided yet,
+ * although we initialized the queue using blk_init_queue().
+ * The type is decided at the first table loading time.
+ * To prevent problematic device stacking, clear the queue flag
+ * for request stacking support until then.
+ *
+ * This queue is new, so no concurrency on the queue_flags.
+ */
+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+ md->saved_make_request_fn = md->queue->make_request_fn;
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
-
- md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
- if (!md->io_pool)
- goto bad_io_pool;
-
- md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
- if (!md->tio_pool)
- goto bad_tio_pool;
-
- md->bs = bioset_create(16, 0);
- if (!md->bs)
- goto bad_no_bioset;
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
+ blk_queue_prep_rq(md->queue, dm_prep_fn);
+ blk_queue_lld_busy(md->queue, dm_lld_busy);
md->disk = alloc_disk(1);
if (!md->disk)
@@ -1170,6 +1798,10 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->wq)
goto bad_thread;
+ md->bdev = bdget_disk(md->disk, 0);
+ if (!md->bdev)
+ goto bad_bdev;
+
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
@@ -1179,15 +1811,11 @@ static struct mapped_device *alloc_dev(int minor)
return md;
+bad_bdev:
+ destroy_workqueue(md->wq);
bad_thread:
put_disk(md->disk);
bad_disk:
- bioset_free(md->bs);
-bad_no_bioset:
- mempool_destroy(md->tio_pool);
-bad_tio_pool:
- mempool_destroy(md->io_pool);
-bad_io_pool:
blk_cleanup_queue(md->queue);
bad_queue:
free_minor(minor);
@@ -1204,14 +1832,15 @@ static void free_dev(struct mapped_device *md)
{
int minor = MINOR(disk_devt(md->disk));
- if (md->suspended_bdev) {
- unlock_fs(md);
- bdput(md->suspended_bdev);
- }
+ unlock_fs(md);
+ bdput(md->bdev);
destroy_workqueue(md->wq);
- mempool_destroy(md->tio_pool);
- mempool_destroy(md->io_pool);
- bioset_free(md->bs);
+ if (md->tio_pool)
+ mempool_destroy(md->tio_pool);
+ if (md->io_pool)
+ mempool_destroy(md->io_pool);
+ if (md->bs)
+ bioset_free(md->bs);
blk_integrity_unregister(md->disk);
del_gendisk(md->disk);
free_minor(minor);
@@ -1226,6 +1855,29 @@ static void free_dev(struct mapped_device *md)
kfree(md);
}
+static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
+{
+ struct dm_md_mempools *p;
+
+ if (md->io_pool && md->tio_pool && md->bs)
+ /* the md already has necessary mempools */
+ goto out;
+
+ p = dm_table_get_md_mempools(t);
+ BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+
+ md->io_pool = p->io_pool;
+ p->io_pool = NULL;
+ md->tio_pool = p->tio_pool;
+ p->tio_pool = NULL;
+ md->bs = p->bs;
+ p->bs = NULL;
+
+out:
+ /* mempool bind completed, now no need any mempools in the table */
+ dm_table_free_md_mempools(t);
+}
+
/*
* Bind a table to the device.
*/
@@ -1249,15 +1901,17 @@ static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
- mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
- i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
+ mutex_lock(&md->bdev->bd_inode->i_mutex);
+ i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
+ mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
-static int __bind(struct mapped_device *md, struct dm_table *t)
+static int __bind(struct mapped_device *md, struct dm_table *t,
+ struct queue_limits *limits)
{
struct request_queue *q = md->queue;
sector_t size;
+ unsigned long flags;
size = dm_table_get_size(t);
@@ -1267,8 +1921,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
if (size != get_capacity(md->disk))
memset(&md->geometry, 0, sizeof(md->geometry));
- if (md->suspended_bdev)
- __set_size(md, size);
+ __set_size(md, size);
if (!size) {
dm_table_destroy(t);
@@ -1277,10 +1930,22 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
dm_table_event_callback(t, event_callback, md);
- write_lock(&md->map_lock);
+ /*
+ * The queue hasn't been stopped yet, if the old table type wasn't
+ * for request-based during suspension. So stop it to prevent
+ * I/O mapping before resume.
+ * This must be done before setting the queue restrictions,
+ * because request-based dm may be run just after the setting.
+ */
+ if (dm_table_request_based(t) && !blk_queue_stopped(q))
+ stop_queue(q);
+
+ __bind_mempools(md, t);
+
+ write_lock_irqsave(&md->map_lock, flags);
md->map = t;
- dm_table_set_restrictions(t, q);
- write_unlock(&md->map_lock);
+ dm_table_set_restrictions(t, q, limits);
+ write_unlock_irqrestore(&md->map_lock, flags);
return 0;
}
@@ -1288,14 +1953,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
static void __unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
+ unsigned long flags;
if (!map)
return;
dm_table_event_callback(map, NULL, NULL);
- write_lock(&md->map_lock);
+ write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
- write_unlock(&md->map_lock);
+ write_unlock_irqrestore(&md->map_lock, flags);
dm_table_destroy(map);
}
@@ -1399,6 +2065,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
+ struct request_queue *q = md->queue;
+ unsigned long flags;
dm_unplug_all(md->queue);
@@ -1408,7 +2076,14 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
set_current_state(interruptible);
smp_mb();
- if (!atomic_read(&md->pending))
+ if (dm_request_based(md)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!queue_in_flight(q) && blk_queue_stopped(q)) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ } else if (!atomic_read(&md->pending))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
@@ -1426,34 +2101,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
return r;
}
-static int dm_flush(struct mapped_device *md)
+static void dm_flush(struct mapped_device *md)
{
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
- return 0;
+
+ bio_init(&md->barrier_bio);
+ md->barrier_bio.bi_bdev = md->bdev;
+ md->barrier_bio.bi_rw = WRITE_BARRIER;
+ __split_and_process_bio(md, &md->barrier_bio);
+
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
static void process_barrier(struct mapped_device *md, struct bio *bio)
{
- int error = dm_flush(md);
-
- if (unlikely(error)) {
- bio_endio(bio, error);
- return;
- }
- if (bio_empty_barrier(bio)) {
- bio_endio(bio, 0);
- return;
- }
-
- __split_and_process_bio(md, bio);
+ md->barrier_error = 0;
- error = dm_flush(md);
+ dm_flush(md);
- if (!error && md->barrier_error)
- error = md->barrier_error;
+ if (!bio_empty_barrier(bio)) {
+ __split_and_process_bio(md, bio);
+ dm_flush(md);
+ }
if (md->barrier_error != DM_ENDIO_REQUEUE)
- bio_endio(bio, error);
+ bio_endio(bio, md->barrier_error);
+ else {
+ spin_lock_irq(&md->deferred_lock);
+ bio_list_add_head(&md->deferred, bio);
+ spin_unlock_irq(&md->deferred_lock);
+ }
}
/*
@@ -1479,10 +2156,14 @@ static void dm_wq_work(struct work_struct *work)
up_write(&md->io_lock);
- if (bio_barrier(c))
- process_barrier(md, c);
- else
- __split_and_process_bio(md, c);
+ if (dm_request_based(md))
+ generic_make_request(c);
+ else {
+ if (bio_barrier(c))
+ process_barrier(md, c);
+ else
+ __split_and_process_bio(md, c);
+ }
down_write(&md->io_lock);
}
@@ -1502,6 +2183,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
+ struct queue_limits limits;
int r = -EINVAL;
mutex_lock(&md->suspend_lock);
@@ -1510,19 +2192,96 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
if (!dm_suspended(md))
goto out;
- /* without bdev, the device size cannot be changed */
- if (!md->suspended_bdev)
- if (get_capacity(md->disk) != dm_table_get_size(table))
- goto out;
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r)
+ goto out;
+
+ /* cannot change the device type, once a table is bound */
+ if (md->map &&
+ (dm_table_get_type(md->map) != dm_table_get_type(table))) {
+ DMWARN("can't change the device type after a table is bound");
+ goto out;
+ }
+
+ /*
+ * It is enought that blk_queue_ordered() is called only once when
+ * the first bio-based table is bound.
+ *
+ * This setting should be moved to alloc_dev() when request-based dm
+ * supports barrier.
+ */
+ if (!md->map && dm_table_bio_based(table))
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
__unbind(md);
- r = __bind(md, table);
+ r = __bind(md, table, &limits);
out:
mutex_unlock(&md->suspend_lock);
return r;
}
+static void dm_rq_invalidate_suspend_marker(struct mapped_device *md)
+{
+ md->suspend_rq.special = (void *)0x1;
+}
+
+static void dm_rq_abort_suspend(struct mapped_device *md, int noflush)
+{
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!noflush)
+ dm_rq_invalidate_suspend_marker(md);
+ __start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_rq_start_suspend(struct mapped_device *md, int noflush)
+{
+ struct request *rq = &md->suspend_rq;
+ struct request_queue *q = md->queue;
+
+ if (noflush)
+ stop_queue(q);
+ else {
+ blk_rq_init(q, rq);
+ blk_insert_request(q, rq, 0, NULL);
+ }
+}
+
+static int dm_rq_suspend_available(struct mapped_device *md, int noflush)
+{
+ int r = 1;
+ struct request *rq = &md->suspend_rq;
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ if (noflush)
+ return r;
+
+ /* The marker must be protected by queue lock if it is in use */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (unlikely(rq->ref_count)) {
+ /*
+ * This can happen, when the previous flush suspend was
+ * interrupted, the marker is still in the queue and
+ * this flush suspend has been invoked, because we don't
+ * remove the marker at the time of suspend interruption.
+ * We have only one marker per mapped_device, so we can't
+ * start another flush suspend while it is in use.
+ */
+ BUG_ON(!rq->special); /* The marker should be invalidated */
+ DMWARN("Invalidating the previous flush suspend is still in"
+ " progress. Please retry later.");
+ r = 0;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return r;
+}
+
/*
* Functions to lock and unlock any filesystem running on the
* device.
@@ -1533,7 +2292,7 @@ static int lock_fs(struct mapped_device *md)
WARN_ON(md->frozen_sb);
- md->frozen_sb = freeze_bdev(md->suspended_bdev);
+ md->frozen_sb = freeze_bdev(md->bdev);
if (IS_ERR(md->frozen_sb)) {
r = PTR_ERR(md->frozen_sb);
md->frozen_sb = NULL;
@@ -1542,9 +2301,6 @@ static int lock_fs(struct mapped_device *md)
set_bit(DMF_FROZEN, &md->flags);
- /* don't bdput right now, we don't want the bdev
- * to go away while it is locked.
- */
return 0;
}
@@ -1553,7 +2309,7 @@ static void unlock_fs(struct mapped_device *md)
if (!test_bit(DMF_FROZEN, &md->flags))
return;
- thaw_bdev(md->suspended_bdev, md->frozen_sb);
+ thaw_bdev(md->bdev, md->frozen_sb);
md->frozen_sb = NULL;
clear_bit(DMF_FROZEN, &md->flags);
}
@@ -1565,6 +2321,53 @@ static void unlock_fs(struct mapped_device *md)
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
+/*
+ * Suspend mechanism in request-based dm.
+ *
+ * After the suspend starts, further incoming requests are kept in
+ * the request_queue and deferred.
+ * Remaining requests in the request_queue at the start of suspend are flushed
+ * if it is flush suspend.
+ * The suspend completes when the following conditions have been satisfied,
+ * so wait for it:
+ * 1. q->in_flight is 0 (which means no in_flight request)
+ * 2. queue has been stopped (which means no request dispatching)
+ *
+ *
+ * Noflush suspend
+ * ---------------
+ * Noflush suspend doesn't need to dispatch remaining requests.
+ * So stop the queue immediately. Then, wait for all in_flight requests
+ * to be completed or requeued.
+ *
+ * To abort noflush suspend, start the queue.
+ *
+ *
+ * Flush suspend
+ * -------------
+ * Flush suspend needs to dispatch remaining requests. So stop the queue
+ * after the remaining requests are completed. (Requeued request must be also
+ * re-dispatched and completed. Until then, we can't stop the queue.)
+ *
+ * During flushing the remaining requests, further incoming requests are also
+ * inserted to the same queue. To distinguish which requests are to be
+ * flushed, we insert a marker request to the queue at the time of starting
+ * flush suspend, like a barrier.
+ * The dispatching is blocked when the marker is found on the top of the queue.
+ * And the queue is stopped when all in_flight requests are completed, since
+ * that means the remaining requests are completely flushed.
+ * Then, the marker is removed from the queue.
+ *
+ * To abort flush suspend, we also need to take care of the marker, not only
+ * starting the queue.
+ * We don't remove the marker forcibly from the queue since it's against
+ * the block-layer manner. Instead, we put a invalidated mark on the marker.
+ * When the invalidated marker is found on the top of the queue, it is
+ * immediately removed from the queue, so it doesn't block dispatching.
+ * Because we have only one marker per mapped_device, we can't start another
+ * flush suspend until the invalidated marker is removed from the queue.
+ * So fail and return with -EBUSY in such a case.
+ */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
@@ -1579,6 +2382,11 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
goto out_unlock;
}
+ if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) {
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
map = dm_get_table(md);
/*
@@ -1591,24 +2399,14 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
/* This does not get reverted if there's an error later. */
dm_table_presuspend_targets(map);
- /* bdget() can stall if the pending I/Os are not flushed */
- if (!noflush) {
- md->suspended_bdev = bdget_disk(md->disk, 0);
- if (!md->suspended_bdev) {
- DMWARN("bdget failed in dm_suspend");
- r = -ENOMEM;
+ /*
+ * Flush I/O to the device. noflush supersedes do_lockfs,
+ * because lock_fs() needs to flush I/Os.
+ */
+ if (!noflush && do_lockfs) {
+ r = lock_fs(md);
+ if (r)
goto out;
- }
-
- /*
- * Flush I/O to the device. noflush supersedes do_lockfs,
- * because lock_fs() needs to flush I/Os.
- */
- if (do_lockfs) {
- r = lock_fs(md);
- if (r)
- goto out;
- }
}
/*
@@ -1634,6 +2432,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
flush_workqueue(md->wq);
+ if (dm_request_based(md))
+ dm_rq_start_suspend(md, noflush);
+
/*
* At this point no more requests are entering target request routines.
* We call dm_wait_for_completion to wait for all existing requests
@@ -1650,6 +2451,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
if (r < 0) {
dm_queue_flush(md);
+ if (dm_request_based(md))
+ dm_rq_abort_suspend(md, noflush);
+
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
}
@@ -1665,11 +2469,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
set_bit(DMF_SUSPENDED, &md->flags);
out:
- if (r && md->suspended_bdev) {
- bdput(md->suspended_bdev);
- md->suspended_bdev = NULL;
- }
-
dm_table_put(map);
out_unlock:
@@ -1696,21 +2495,20 @@ int dm_resume(struct mapped_device *md)
dm_queue_flush(md);
- unlock_fs(md);
+ /*
+ * Flushing deferred I/Os must be done after targets are resumed
+ * so that mapping of targets can work correctly.
+ * Request-based dm is queueing the deferred I/Os in its request_queue.
+ */
+ if (dm_request_based(md))
+ start_queue(md->queue);
- if (md->suspended_bdev) {
- bdput(md->suspended_bdev);
- md->suspended_bdev = NULL;
- }
+ unlock_fs(md);
clear_bit(DMF_SUSPENDED, &md->flags);
dm_table_unplug_all(map);
-
- dm_kobject_uevent(md);
-
r = 0;
-
out:
dm_table_put(map);
mutex_unlock(&md->suspend_lock);
@@ -1721,9 +2519,19 @@ out:
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
-void dm_kobject_uevent(struct mapped_device *md)
-{
- kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
+void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie)
+{
+ char udev_cookie[DM_COOKIE_LENGTH];
+ char *envp[] = { udev_cookie, NULL };
+
+ if (!cookie)
+ kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ else {
+ snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+ DM_COOKIE_ENV_VAR_NAME, cookie);
+ kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
+ }
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
@@ -1777,6 +2585,10 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
if (&md->kobj != kobj)
return NULL;
+ if (test_bit(DMF_FREEING, &md->flags) ||
+ test_bit(DMF_DELETING, &md->flags))
+ return NULL;
+
dm_get(md);
return md;
}
@@ -1797,6 +2609,61 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
+{
+ struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
+
+ if (!pools)
+ return NULL;
+
+ pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
+ mempool_create_slab_pool(MIN_IOS, _io_cache) :
+ mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+ if (!pools->io_pool)
+ goto free_pools_and_out;
+
+ pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
+ mempool_create_slab_pool(MIN_IOS, _tio_cache) :
+ mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
+ if (!pools->tio_pool)
+ goto free_io_pool_and_out;
+
+ pools->bs = (type == DM_TYPE_BIO_BASED) ?
+ bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
+ if (!pools->bs)
+ goto free_tio_pool_and_out;
+
+ return pools;
+
+free_tio_pool_and_out:
+ mempool_destroy(pools->tio_pool);
+
+free_io_pool_and_out:
+ mempool_destroy(pools->io_pool);
+
+free_pools_and_out:
+ kfree(pools);
+
+ return NULL;
+}
+
+void dm_free_md_mempools(struct dm_md_mempools *pools)
+{
+ if (!pools)
+ return;
+
+ if (pools->io_pool)
+ mempool_destroy(pools->io_pool);
+
+ if (pools->tio_pool)
+ mempool_destroy(pools->tio_pool);
+
+ if (pools->bs)
+ bioset_free(pools->bs);
+
+ kfree(pools);
+}
+
static struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a31506d93e9..23278ae80f0 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -23,6 +23,13 @@
#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
/*
+ * Type of table and mapped_device's mempool
+ */
+#define DM_TYPE_NONE 0
+#define DM_TYPE_BIO_BASED 1
+#define DM_TYPE_REQUEST_BASED 2
+
+/*
* List of devices that a metadevice uses and should open/close.
*/
struct dm_dev_internal {
@@ -32,6 +39,7 @@ struct dm_dev_internal {
};
struct dm_table;
+struct dm_md_mempools;
/*-----------------------------------------------------------------
* Internal table functions.
@@ -41,18 +49,34 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
-void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
+int dm_calculate_queue_limits(struct dm_table *table,
+ struct queue_limits *limits);
+void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits);
struct list_head *dm_table_get_devices(struct dm_table *t);
void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
+int dm_table_any_busy_target(struct dm_table *t);
+int dm_table_set_type(struct dm_table *t);
+unsigned dm_table_get_type(struct dm_table *t);
+bool dm_table_bio_based(struct dm_table *t);
+bool dm_table_request_based(struct dm_table *t);
+int dm_table_alloc_md_mempools(struct dm_table *t);
+void dm_table_free_md_mempools(struct dm_table *t);
+struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
*/
#define dm_target_is_valid(t) ((t)->table)
+/*
+ * To check whether the target type is request-based or not (bio-based).
+ */
+#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
+
/*-----------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
@@ -92,9 +116,16 @@ void dm_stripe_exit(void);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
-void dm_kobject_uevent(struct mapped_device *md);
+void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie);
int dm_kcopyd_init(void);
void dm_kcopyd_exit(void);
+/*
+ * Mempool operations
+ */
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type);
+void dm_free_md_mempools(struct dm_md_mempools *pools);
+
#endif
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 15c8b7b25a9..5810fa906af 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
rdev->sectors = sectors * mddev->chunk_sectors;
}
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 09be637d52c..0f4a70c43ff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3573,7 +3573,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3601,7 +3602,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3844,11 +3846,9 @@ static int md_alloc(dev_t dev, char *name)
flush_scheduled_work();
mutex_lock(&disks_mutex);
- if (mddev->gendisk) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -EEXIST;
- }
+ error = -EEXIST;
+ if (mddev->gendisk)
+ goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
@@ -3860,17 +3860,15 @@ static int md_alloc(dev_t dev, char *name)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
- return -EEXIST;
+ goto abort;
}
spin_unlock(&all_mddevs_lock);
}
+ error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mddev->queue) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -ENOMEM;
- }
+ if (!mddev->queue)
+ goto abort;
mddev->queue->queuedata = mddev;
/* Can be unlocked because the queue is new: no concurrency */
@@ -3880,11 +3878,9 @@ static int md_alloc(dev_t dev, char *name)
disk = alloc_disk(1 << shift);
if (!disk) {
- mutex_unlock(&disks_mutex);
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
- mddev_put(mddev);
- return -ENOMEM;
+ goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -3906,16 +3902,22 @@ static int md_alloc(dev_t dev, char *name)
mddev->gendisk = disk;
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
- mutex_unlock(&disks_mutex);
- if (error)
+ if (error) {
+ /* This isn't possible, but as kobject_init_and_add is marked
+ * __must_check, we must do something with the result
+ */
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
- else {
+ error = 0;
+ }
+ abort:
+ mutex_unlock(&disks_mutex);
+ if (!error) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
- return 0;
+ return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -6334,10 +6336,16 @@ void md_do_sync(mddev_t *mddev)
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- if (j >= mddev->resync_max)
- wait_event(mddev->recovery_wait,
- mddev->resync_max > j
- || kthread_should_stop());
+ while (j >= mddev->resync_max && !kthread_should_stop()) {
+ /* As this condition is controlled by user-space,
+ * we can block indefinitely, so use '_interruptible'
+ * to avoid triggering warnings.
+ */
+ flush_signals(current); /* just in case */
+ wait_event_interruptible(mddev->recovery_wait,
+ mddev->resync_max > j
+ || kthread_should_stop());
+ }
if (kthread_should_stop())
goto interrupted;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index cbe368fa659..237fe3fd235 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
q = rdev->bdev->bd_disk->queue;
- blk_queue_stack_limits(mddev->queue, q);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
@@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev)
disk = conf->multipaths + disk_idx;
disk->rdev = rdev;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ab4a489d869..335f490dcad 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
}
dev[j] = rdev1;
- blk_queue_stack_limits(mddev->queue,
- rdev1->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
mddev->chunk_sectors << 9);
goto abort;
}
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
printk(KERN_INFO "raid0: done.\n");
mddev->private = conf;
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89939a7aef5..0569efba0c0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (mirror = first; mirror <= last; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10..7298a5e5a18 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for ( ; mirror <= last ; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int run(mddev_t *mddev)
{
conf_t *conf;
- int i, disk_idx;
+ int i, disk_idx, chunk_size;
mirror_info_t *disk;
mdk_rdev_t *rdev;
int nc, fc, fo;
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock;
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ if (conf->raid_disks % conf->near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+ else
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks / conf->near_copies));
+
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f9f991e6e13..37835538b58 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
goto retry;
}
}
- /* FIXME what if we get a false positive because these
- * are being updated.
- */
- if (logical_sector >= mddev->suspend_lo &&
+
+ if (bio_data_dir(bi) == WRITE &&
+ logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
release_stripe(sh);
- schedule();
+ /* As the suspend_* range is controlled by
+ * userspace, we want an interruptible
+ * wait.
+ */
+ flush_signals(current);
+ prepare_to_wait(&conf->wait_for_overlap,
+ &w, TASK_INTERRUPTIBLE);
+ if (logical_sector >= mddev->suspend_lo &&
+ logical_sector < mddev->suspend_hi)
+ schedule();
goto retry;
}
@@ -4452,7 +4460,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
- int working_disks = 0;
+ int working_disks = 0, chunk_size;
mdk_rdev_t *rdev;
if (mddev->recovery_cp != MaxSector)
@@ -4607,6 +4615,14 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks - conf->max_degraded));
+
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
return 0;
abort:
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 240608cc7ae..a461017ce5c 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_spi_host *host;
int status;
+ /* We rely on full duplex transfers, mostly to reduce
+ * per-transfer overheads (by making fewer transfers).
+ */
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
/* MMC and SD specs only seem to care that sampling is on the
* rising edge ... meaning SPI modes 0 or 3. So either SPI mode
* should be legit. We'll use mode 0 since the steady state is 0,
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 5011fa73f91..1479da6d3aa 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
- dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
+ dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part,
parts[this_part].name,
parts[this_part].offset,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 59c46126a5c..ae5fe91867e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -54,7 +54,7 @@
#define SR_SRWD 0x80 /* SR write protect */
/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define CMD_SIZE 4
#ifdef CONFIG_M25PXX_USE_FAST_READ
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 73f05227dc8..d8cf29c01cc 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
if (!desperate && inftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
"EUNs (%d)\n", inftl->numfreeEUNs);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
silly = MAX_LOOPS;
while (thisEUN < inftl->nb_blocks) {
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
- if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
+ if ((BlockMap[block] != BLOCK_NIL) ||
+ BlockDeleted[block])
continue;
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in "
"Virtual Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -549,7 +550,7 @@ hitused:
* waiting to be picked up. We're going to have to fold
* a chain to make room.
*/
- thisEUN = INFTL_makefreeblock(inftl, 0xffff);
+ thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
/*
* Hopefully we free something, lets try again.
@@ -631,7 +632,7 @@ hitused:
printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
"Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/*
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index b08a798ee25..2aac41bde8b 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -42,10 +42,8 @@
#include <mach/hardware.h>
#include <asm/system.h>
-#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
-
struct armflash_subdev_info {
- char name[SUBDEV_NAME_SIZE];
+ char *name;
struct mtd_info *mtd;
struct map_info map;
struct flash_platform_data *plat;
@@ -134,6 +132,8 @@ static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
map_destroy(subdev->mtd);
if (subdev->map.virt)
iounmap(subdev->map.virt);
+ kfree(subdev->name);
+ subdev->name = NULL;
release_mem_region(subdev->map.phys, subdev->map.size);
}
@@ -177,16 +177,22 @@ static int armflash_probe(struct platform_device *dev)
if (nr == 1)
/* No MTD concatenation, just use the default name */
- snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s",
- dev_name(&dev->dev));
+ subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL);
else
- snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d",
- dev_name(&dev->dev), i);
+ subdev->name = kasprintf(GFP_KERNEL, "%s-%d",
+ dev_name(&dev->dev), i);
+ if (!subdev->name) {
+ err = -ENOMEM;
+ break;
+ }
subdev->plat = plat;
err = armflash_subdev_probe(subdev, res);
- if (err)
+ if (err) {
+ kfree(subdev->name);
+ subdev->name = NULL;
break;
+ }
}
info->nr_subdev = i;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2802992b39d..20c828ba940 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
&num_partitions);
if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n");
+ printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
res = ENXIO;
goto err_no_partitions;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0cd76f89f4b..ebd07e95b81 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,8 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -541,7 +543,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
unsigned long timeo = jiffies;
- int status, state = this->state;
+ int status = NAND_STATUS_FAIL, state = this->state;
if (state == FL_ERASING)
timeo += (HZ * 400) / 1000;
@@ -556,8 +558,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
while (time_before(jiffies, timeo)) {
status = __raw_readb(this->IO_ADDR_R);
- if (!(status & 0x40))
+ if (status & NAND_STATUS_READY)
break;
+ cond_resched();
}
return status;
}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index e3f8495a94c..fb86cacd5bd 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -208,7 +208,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -230,11 +230,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
printk("Argh! No free blocks found! LastFreeEUN = %d, "
"FirstEUN = %d\n", nftl->LastFreeEUN,
le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
- return 0xffff;
+ return BLOCK_NIL;
}
} while (pot != nftl->LastFreeEUN);
- return 0xffff;
+ return BLOCK_NIL;
}
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
@@ -431,7 +431,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
/* add the header so that it is now a valid chain */
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
- oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
8, &retlen, (char *)&oob.u);
@@ -515,7 +515,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
if (ChainLength < 2) {
printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
"Failing request\n");
- return 0xffff;
+ return BLOCK_NIL;
}
return NFTL_foldchain (nftl, LongestChain, pendingblock);
@@ -578,7 +578,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING
"Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -601,7 +601,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
//u16 startEUN = nftl->EUNtable[thisVUC];
//printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
- writeEUN = NFTL_makefreeblock(nftl, 0xffff);
+ writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
if (writeEUN == BLOCK_NIL) {
/* OK, we accept that the above comment is
@@ -673,7 +673,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1dc721517e4..c155bd3ec9f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1725,6 +1725,7 @@ config TLAN
config KS8842
tristate "Micrel KSZ8842"
+ depends on HAS_IOMEM
help
This platform driver is for Micrel KSZ8842 chip.
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index e4afbd628c2..607007d75b6 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 619c6583e1a..4003955d7a9 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f703758f0a6..5b4bf3d2cdc 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define BE_MAX_LRO_DESCRIPTORS 16
-#define BE_MAX_FRAGS_PER_FRAME 16
+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
struct be_dma_mem {
void *va;
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9592f22e4c8..cccc5419ad7 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
return -EINVAL;
adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
- if (adapter->max_rx_coal > MAX_SKB_FRAGS)
- adapter->max_rx_coal = MAX_SKB_FRAGS - 1;
+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
/* if AIC is being turned on now, start with an EQD of 0 */
if (rx_eq->enable_aic == 0 &&
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66c10c87f51..308eb09ca56 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -666,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
{
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd;
+ u16 rxq_idx, i, num_rcvd, j;
u32 pktsize, hdr_len, curr_frag_len;
u8 *start;
@@ -709,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
/* More frags present for this completion */
pktsize -= curr_frag_len; /* account for above copied frag */
- for (i = 1; i < num_rcvd; i++) {
+ for (i = 1, j = 0; i < num_rcvd; i++) {
index_inc(&rxq_idx, rxq->len);
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(pktsize, rx_frag_size);
- skb_shinfo(skb)->frags[i].page = page_info->page;
- skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
- skb_shinfo(skb)->frags[i].size = curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (page_info->page_offset == 0) {
+ /* Fresh page */
+ j++;
+ skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ skb_shinfo(skb)->nr_frags++;
+ } else {
+ put_page(page_info->page);
+ }
+
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- skb_shinfo(skb)->nr_frags++;
pktsize -= curr_frag_len;
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
@@ -786,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
struct be_queue_info *rxq = &adapter->rx_obj.q;
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
- u16 i, rxq_idx = 0, vid;
+ u16 i, rxq_idx = 0, vid, j;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
@@ -794,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
remaining = pkt_size;
- for (i = 0; i < num_rcvd; i++) {
+ for (i = 0, j = -1; i < num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
- rx_frags[i].page = page_info->page;
- rx_frags[i].page_offset = page_info->page_offset;
- rx_frags[i].size = curr_frag_len;
- remaining -= curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (i == 0 || page_info->page_offset == 0) {
+ /* First frag or Fresh page */
+ j++;
+ rx_frags[j].page = page_info->page;
+ rx_frags[j].page_offset = page_info->page_offset;
+ rx_frags[j].size = 0;
+ } else {
+ put_page(page_info->page);
+ }
+ rx_frags[j].size += curr_frag_len;
+ remaining -= curr_frag_len;
index_inc(&rxq_idx, rxq->len);
-
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
if (likely(!vlanf)) {
lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 38f1c3375d7..b70cc99962f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev)
return 0;
}
+static u32
+bnx2_get_link(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ return bp->link_up;
+}
+
static int
bnx2_get_eeprom_len(struct net_device *dev)
{
@@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
.get_wol = bnx2_get_wol,
.set_wol = bnx2_set_wol,
.nway_reset = bnx2_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2_get_link,
.get_eeprom_len = bnx2_get_eeprom_len,
.get_eeprom = bnx2_get_eeprom,
.set_eeprom = bnx2_set_eeprom,
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fbf1352e9c1..951714a7f90 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
return 0;
}
+static u32
+bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->link_vars.link_up;
+}
+
static int bnx2x_get_eeprom_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
.nway_reset = bnx2x_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2x_get_link,
.get_eeprom_len = bnx2x_get_eeprom_len,
.get_eeprom = bnx2x_get_eeprom,
.set_eeprom = bnx2x_set_eeprom,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5e18812bf4..33821a81cbf 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING
If unsure, say Y.
config CAN_SJA1000
- depends on CAN_DEV
+ depends on CAN_DEV && HAS_IOMEM
tristate "Philips SJA1000"
---help---
Driver for the SJA1000 CAN controllers from Philips or NXP
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 58afafbd3b9..fd5e32cbcb8 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_multicast_list = cpmac_set_multicast_list,
- .ndo_so_ioctl = cpmac_ioctl,
+ .ndo_do_ioctl = cpmac_ioctl,
.ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e3356f8eb5..5b8cbdb4b52 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->skb) {
+ if (buffer_info->dma) {
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ }
+
+ buffer_info->dma = 0;
+ if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
@@ -4222,6 +4227,7 @@ map_skb:
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
break; /* while !buffer_info->skb */
}
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 679885a122b..63415bb6f48 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000e_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3af581303ca..d167090248e 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
}
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
{
struct gfar __iomem *enet_regs;
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
#endif
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
struct device_node *np = NULL;
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs);
#else
err = -ENODEV;
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ea17319624a..be480292aba 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
cleaned = true;
cleaned_count++;
+ /* this is the fast path for the non-packet split case */
if (!adapter->rx_ps_hdr_size) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_buffer_len +
- NET_IP_ALIGN,
+ adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
if (!skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_ps_hdr_size + NET_IP_ALIGN,
+ adapter->rx_ps_hdr_size,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
bufsz = adapter->rx_ps_hdr_size;
else
bufsz = adapter->rx_buffer_len;
- bufsz += NET_IP_ALIGN;
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
}
if (!buffer_info->skb) {
- skb = netdev_alloc_skb(netdev, bufsz);
+ skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
igb_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f3eed6a8fba..911c082cee5 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
return 0;
}
+static const struct net_device_ops bfin_sir_ndo = {
+ .ndo_open = bfin_sir_open,
+ .ndo_stop = bfin_sir_stop,
+ .ndo_start_xmit = bfin_sir_hard_xmit,
+ .ndo_do_ioctl = bfin_sir_ioctl,
+ .ndo_get_stats = bfin_sir_stats,
+};
+
static int __devinit bfin_sir_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
if (err)
goto err_mem_3;
- dev->hard_start_xmit = bfin_sir_hard_xmit;
- dev->open = bfin_sir_open;
- dev->stop = bfin_sir_stop;
- dev->do_ioctl = bfin_sir_ioctl;
- dev->get_stats = bfin_sir_stats;
- dev->irq = sir_port->irq;
+ dev->netdev_ops = &bfin_sir_ndo;
+ dev->irq = sir_port->irq;
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 86f4f3e36f2..0f7b6a3a2e6 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
s32 err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
/* 10000/copper and 1000/copper must autoneg
* this function does not support any duplex forcing, but can
* limit the advertising of the adapter to only 10000 or 1000 */
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
} else {
/* in this case we currently only support 10Gb/FULL */
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e756e220db3..5588ef493a3 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb = netdev_alloc_skb(adapter->netdev,
+ (rx_ring->rx_buf_len +
+ NET_IP_ALIGN));
if (!skb) {
adapter->alloc_rx_buff_failed++;
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
skb_reserve(skb, NET_IP_ALIGN);
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data, bufsz,
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
pci_unmap_single(pdev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
skb_put(skb, len);
}
@@ -2701,7 +2704,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
*/
err = hw->phy.ops.identify(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
ixgbe_down(adapter);
return err;
}
@@ -2812,9 +2818,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- rx_buffer_info->page_dma = 0;
+ if (rx_buffer_info->page_dma) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ }
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
rx_buffer_info->page_offset = 0;
@@ -3720,10 +3728,11 @@ static void ixgbe_sfp_task(struct work_struct *work)
goto reschedule;
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "failed to initialize because an "
- "unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a "
- "supported module.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize "
+ "because an unsupported SFP+ module type "
+ "was detected.\n"
+ "Reload the driver after installing a "
+ "supported module.\n");
unregister_netdev(adapter->netdev);
} else {
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -4502,7 +4511,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
u32 autoneg;
adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
- if (hw->mac.ops.get_link_capabilities)
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg,
&hw->mac.autoneg);
if (hw->mac.ops.setup_link_speed)
@@ -4526,7 +4536,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
ixgbe_down(adapter);
return;
}
@@ -5513,8 +5526,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
round_jiffies(jiffies + (2 * HZ)));
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to load because an "
- "unsupported SFP+ module type was detected.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
goto err_sw_init;
} else if (err) {
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index dc45e9856c3..6851bdb2ce2 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -14,6 +14,10 @@
#include <linux/mdio.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers");
+MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc.");
+MODULE_LICENSE("GPL");
+
/**
* mdio45_probe - probe for an MDIO (clause 45) device
* @mdio: MDIO interface
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index bdb143d2b5c..055bb61d6e7 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -944,28 +944,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
u32 val = 0;
int retries = 60;
- if (!pegtune_val) {
- do {
- val = NXRD32(adapter, CRB_CMDPEG_STATE);
+ if (pegtune_val)
+ return 0;
- if (val == PHAN_INITIALIZE_COMPLETE ||
- val == PHAN_INITIALIZE_ACK)
- return 0;
+ do {
+ val = NXRD32(adapter, CRB_CMDPEG_STATE);
- msleep(500);
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
- } while (--retries);
+ msleep(500);
- if (!retries) {
- pegtune_val = NXRD32(adapter,
- NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
- printk(KERN_WARNING "netxen_phantom_init: init failed, "
- "pegtune_val=%x\n", pegtune_val);
- return -1;
- }
- }
+ } while (--retries);
- return 0;
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 71daa3d5f11..2919a2d12bf 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
first_driver = (adapter->ahw.pci_func == 0);
if (!first_driver)
- return 0;
+ goto wait_init;
first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
@@ -752,6 +752,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
| (_NETXEN_NIC_LINUX_SUBVERSION);
NXWR32(adapter, CRB_DRIVER_VERSION, val);
+wait_init:
/* Handshake with the card before we register the devices. */
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
if (err) {
@@ -1178,6 +1179,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
+#ifdef CONFIG_PM
static int
netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1242,6 +1244,7 @@ netxen_nic_resume(struct pci_dev *pdev)
return 0;
}
+#endif
static int netxen_nic_open(struct net_device *netdev)
{
@@ -1771,8 +1774,10 @@ static struct pci_driver netxen_driver = {
.id_table = netxen_pci_tbl,
.probe = netxen_nic_probe,
.remove = __devexit_p(netxen_nic_remove),
+#ifdef CONFIG_PM
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume
+#endif
};
/* Driver Registration on NetXen card */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index bbc6d4d3cc9..3e4b67aaa6e 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
(void __iomem *)port_regs;
u32 delay = 10;
int status = 0;
+ unsigned long hw_flags = 0;
if(ql_mii_setup(qdev))
return -1;
@@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
(ISP_SERIAL_PORT_IF_WE |
(ISP_SERIAL_PORT_IF_WE << 16)));
-
+ /* Give the PHY time to come out of reset. */
+ mdelay(100);
qdev->port_link_state = LS_DOWN;
netif_carrier_off(qdev->ndev);
@@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
value = ql_read_page0_reg(qdev, &port_regs->portStatus);
if (value & PORT_STATUS_IC)
break;
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
msleep(500);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
} while (--delay);
if (delay == 0) {
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 341882f959f..a2d82ddb3b4 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- u32 ioaddr, boguscnt = RX_RING_SIZE;
- u32 intr_status = 0;
+ u32 ioaddr, intr_status = 0;
ioaddr = ndev->base_addr;
spin_lock(&mdp->lock);
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & cd->eesr_err_check)
sh_eth_error(ndev, intr_status);
- if (--boguscnt < 0) {
- printk(KERN_WARNING
- "%s: Too much work at interrupt, status=0x%4.4x.\n",
- ndev->name, intr_status);
- }
-
other_irq:
spin_unlock(&mdp->lock);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7681d28c53d..daf961ab68b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (likely(status >> 16 == (status & 0xffff))) {
skb = sky2->rx_ring[sky2->rx_next].skb;
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = status & 0xffff;
+ skb->csum = le16_to_cpu(status);
} else {
printk(KERN_NOTICE PFX "%s: hardware receive "
"checksum problem (status = %#x)\n",
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 80e01778dd3..cd35d50e46d 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return crc == crc2;
if (unlikely(crc != crc2)) {
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
dev_kfree_skb_any(skb2);
} else
usbnet_skb_return(dev, skb2);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 7ae82446b93..1d3730d6690 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (unlikely(status & 0xbf)) {
- if (status & 0x01) dev->stats.rx_fifo_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x20) dev->stats.rx_missed_errors++;
- if (status & 0x90) dev->stats.rx_length_errors++;
+ if (status & 0x01) dev->net->stats.rx_fifo_errors++;
+ if (status & 0x02) dev->net->stats.rx_crc_errors++;
+ if (status & 0x04) dev->net->stats.rx_frame_errors++;
+ if (status & 0x20) dev->net->stats.rx_missed_errors++;
+ if (status & 0x90) dev->net->stats.rx_length_errors++;
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 034e8a73ca6..aeb1ab03a9e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
net->hard_header_len, dev->hard_mtu, net->mtu);
#endif
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
nc_ensure_sync(dev);
return 0;
}
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
hdr_len = le16_to_cpup(&header->hdr_len);
packet_len = le16_to_cpup(&header->packet_len);
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("packet too big, %d", packet_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len < MIN_HEADER) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("header too short, %d", hdr_len);
nc_ensure_sync(dev);
return 0;
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((packet_len & 0x01) == 0) {
if (skb->data [packet_len] != PAD_BYTE) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad pad");
return 0;
}
skb_trim(skb, skb->len - 1);
}
if (skb->len != packet_len) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad packet len %d (expected %d)",
skb->len, packet_len);
nc_ensure_sync(dev);
return 0;
}
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
- dev->stats.rx_fifo_errors++;
+ dev->net->stats.rx_fifo_errors++;
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
le16_to_cpu(header->packet_id),
le16_to_cpu(trailer->packet_id));
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1bf243ef950..2232232b798 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|| skb->len < msg_len
|| (data_offset + data_len + 8) > msg_len)) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
le32_to_cpu(hdr->msg_type),
msg_len, data_offset, data_len, skb->len);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 89a91f8c22d..fe045896406 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(header & RX_STS_ES_)) {
if (netif_msg_rx_err(dev))
devdbg(dev, "Error header=0x%08x", header);
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_dropped++;
if (header & RX_STS_CRC_) {
- dev->stats.rx_crc_errors++;
+ dev->net->stats.rx_crc_errors++;
} else {
if (header & (RX_STS_TL_ | RX_STS_RF_))
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
if ((header & RX_STS_LE_) &&
(!(header & RX_STS_FT_)))
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_length_errors++;
}
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 22c0585a031..edfd9e10ceb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
int status;
skb->protocol = eth_type_trans (skb, dev->net);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
if (netif_msg_rx_status (dev))
devdbg (dev, "< rx, len %zu, type 0x%x",
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
if (netif_msg_rx_err (dev))
devdbg (dev, "drop");
error:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
skb_queue_tail (&dev->done, skb);
}
}
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
case 0:
if (skb->len < dev->net->hard_header_len) {
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx length %d", skb->len);
}
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
* storm, recovering as needed.
*/
case -EPIPE:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
// FALLTHROUGH
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
case -EPROTO:
case -ETIME:
case -EILSEQ:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
if (netif_msg_link (dev))
@@ -465,12 +465,12 @@ block:
/* data overrun ... flush fifo? */
case -EOVERFLOW:
- dev->stats.rx_over_errors++;
+ dev->net->stats.rx_over_errors++;
// FALLTHROUGH
default:
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx status %d", urb_status);
break;
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
if (netif_msg_ifdown (dev))
devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
- dev->stats.rx_packets, dev->stats.tx_packets,
- dev->stats.rx_errors, dev->stats.tx_errors
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors
);
// ensure there are no more active urbs
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += entry->length;
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += entry->length;
} else {
- dev->stats.tx_errors++;
+ dev->net->stats.tx_errors++;
switch (urb->status) {
case -EPIPE:
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
devdbg (dev, "drop, code %d", retval);
drop:
retval = NET_XMIT_SUCCESS;
- dev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 87197dd9c78..1097c72e44d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -208,11 +208,14 @@ rx_drop:
static struct net_device_stats *veth_get_stats(struct net_device *dev)
{
- struct veth_priv *priv = netdev_priv(dev);
- struct net_device_stats *dev_stats = &dev->stats;
- unsigned int cpu;
+ struct veth_priv *priv;
+ struct net_device_stats *dev_stats;
+ int cpu;
struct veth_net_stats *stats;
+ priv = netdev_priv(dev);
+ dev_stats = &dev->stats;
+
dev_stats->rx_packets = 0;
dev_stats->tx_packets = 0;
dev_stats->rx_bytes = 0;
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
dev_stats->tx_dropped = 0;
dev_stats->rx_dropped = 0;
- if (priv->stats)
- for_each_online_cpu(cpu) {
- stats = per_cpu_ptr(priv->stats, cpu);
+ for_each_online_cpu(cpu) {
+ stats = per_cpu_ptr(priv->stats, cpu);
- dev_stats->rx_packets += stats->rx_packets;
- dev_stats->tx_packets += stats->tx_packets;
- dev_stats->rx_bytes += stats->rx_bytes;
- dev_stats->tx_bytes += stats->tx_bytes;
- dev_stats->tx_dropped += stats->tx_dropped;
- dev_stats->rx_dropped += stats->rx_dropped;
- }
+ dev_stats->rx_packets += stats->rx_packets;
+ dev_stats->tx_packets += stats->tx_packets;
+ dev_stats->rx_bytes += stats->rx_bytes;
+ dev_stats->tx_bytes += stats->tx_bytes;
+ dev_stats->tx_dropped += stats->tx_dropped;
+ dev_stats->rx_dropped += stats->rx_dropped;
+ }
return dev_stats;
}
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev)
netif_carrier_off(dev);
netif_carrier_off(priv->peer);
- free_percpu(priv->stats);
- priv->stats = NULL;
return 0;
}
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev)
return 0;
}
+static void veth_dev_free(struct net_device *dev)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ free_percpu(priv->stats);
+ free_netdev(dev);
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev)
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
- dev->destructor = free_netdev;
+ dev->destructor = veth_dev_free;
}
/*
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1032d5fdbd4..2597145a066 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards {
netmos_9755,
netmos_9805,
netmos_9815,
+ netmos_9901,
quatech_sppxp100,
};
@@ -2987,7 +2988,7 @@ static struct parport_pc_pci {
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
-
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x2000, 0, 0, netmos_9901 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index fbc63d5e459..eb159587d0b 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -354,7 +354,7 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
status = acpi_run_hpp(handle, hpp);
if (ACPI_SUCCESS(status))
break;
- if (acpi_root_bridge(handle))
+ if (acpi_is_root_bridge(handle))
break;
status = acpi_get_parent(handle, &phandle);
if (ACPI_FAILURE(status))
@@ -428,7 +428,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status))
goto got_one;
- if (acpi_root_bridge(handle))
+ if (acpi_is_root_bridge(handle))
break;
chandle = handle;
status = acpi_get_parent(chandle, &handle);
@@ -449,42 +449,6 @@ got_one:
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
-/* acpi_root_bridge - check to see if this acpi object is a root bridge
- *
- * @handle - the acpi object in question.
- */
-int acpi_root_bridge(acpi_handle handle)
-{
- acpi_status status;
- struct acpi_device_info *info;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- int i;
-
- status = acpi_get_object_info(handle, &buffer);
- if (ACPI_SUCCESS(status)) {
- info = buffer.pointer;
- if ((info->valid & ACPI_VALID_HID) &&
- !strcmp(PCI_ROOT_HID_STRING,
- info->hardware_id.value)) {
- kfree(buffer.pointer);
- return 1;
- }
- if (info->valid & ACPI_VALID_CID) {
- for (i=0; i < info->compatibility_id.count; i++) {
- if (!strcmp(PCI_ROOT_HID_STRING,
- info->compatibility_id.id[i].value)) {
- kfree(buffer.pointer);
- return 1;
- }
- }
- }
- kfree(buffer.pointer);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_root_bridge);
-
-
static int is_ejectable(acpi_handle handle)
{
acpi_status status;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3a6064bce56..0cb0f830a99 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -678,18 +678,9 @@ static void remove_bridge(acpi_handle handle)
static struct pci_dev * get_apic_pci_info(acpi_handle handle)
{
- struct acpi_pci_id id;
- struct pci_bus *bus;
struct pci_dev *dev;
- if (ACPI_FAILURE(acpi_get_pci_id(handle, &id)))
- return NULL;
-
- bus = pci_find_bus(id.segment, id.bus);
- if (!bus)
- return NULL;
-
- dev = pci_get_slot(bus, PCI_DEVFN(id.device, id.function));
+ dev = acpi_get_pci_dev(handle);
if (!dev)
return NULL;
@@ -1396,19 +1387,16 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
/* Program resources in newly inserted bridge */
static int acpiphp_configure_bridge (acpi_handle handle)
{
- struct acpi_pci_id pci_id;
+ struct pci_dev *dev;
struct pci_bus *bus;
- if (ACPI_FAILURE(acpi_get_pci_id(handle, &pci_id))) {
+ dev = acpi_get_pci_dev(handle);
+ if (!dev) {
err("cannot get PCI domain and bus number for bridge\n");
return -EINVAL;
}
- bus = pci_find_bus(pci_id.segment, pci_id.bus);
- if (!bus) {
- err("cannot find bus %d:%d\n",
- pci_id.segment, pci_id.bus);
- return -EINVAL;
- }
+
+ bus = dev->bus;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -1416,6 +1404,7 @@ static int acpiphp_configure_bridge (acpi_handle handle)
acpiphp_set_hpp_values(handle, bus);
pci_enable_bridges(bus);
acpiphp_configure_ioapics(handle);
+ pci_dev_put(dev);
return 0;
}
@@ -1631,7 +1620,7 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *count = (int *)context;
- if (acpi_root_bridge(handle)) {
+ if (acpi_is_root_bridge(handle)) {
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge, NULL);
(*count)++;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 178853a0744..420afa88728 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -217,6 +217,14 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
+/*
+ * This domain is a statically identity mapping domain.
+ * 1. This domain creats a static 1:1 mapping to all usable memory.
+ * 2. It maps to each iommu if successful.
+ * 3. Each iommu mapps to this domain if successful.
+ */
+struct dmar_domain *si_domain;
+
/* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -225,6 +233,9 @@ static inline bool dma_pte_present(struct dma_pte *pte)
*/
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
+
struct dmar_domain {
int id; /* domain id */
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
@@ -435,12 +446,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
-/* in native case, each domain is related to only one iommu */
+/* This functionin only returns single iommu in a domain */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
int iommu_id;
+ /* si_domain and vm domain should not get here. */
BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
+ BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
@@ -1189,48 +1202,71 @@ void free_dmar_iommu(struct intel_iommu *iommu)
free_context_table(iommu);
}
-static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
+static struct dmar_domain *alloc_domain(void)
{
- unsigned long num;
- unsigned long ndomains;
struct dmar_domain *domain;
- unsigned long flags;
domain = alloc_domain_mem();
if (!domain)
return NULL;
+ memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+ domain->flags = 0;
+
+ return domain;
+}
+
+static int iommu_attach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ int num;
+ unsigned long ndomains;
+ unsigned long flags;
+
ndomains = cap_ndoms(iommu->cap);
spin_lock_irqsave(&iommu->lock, flags);
+
num = find_first_zero_bit(iommu->domain_ids, ndomains);
if (num >= ndomains) {
spin_unlock_irqrestore(&iommu->lock, flags);
- free_domain_mem(domain);
printk(KERN_ERR "IOMMU: no free domain ids\n");
- return NULL;
+ return -ENOMEM;
}
- set_bit(num, iommu->domain_ids);
domain->id = num;
- memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+ set_bit(num, iommu->domain_ids);
set_bit(iommu->seq_id, &domain->iommu_bmp);
- domain->flags = 0;
iommu->domains[num] = domain;
spin_unlock_irqrestore(&iommu->lock, flags);
- return domain;
+ return 0;
}
-static void iommu_free_domain(struct dmar_domain *domain)
+static void iommu_detach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
{
unsigned long flags;
- struct intel_iommu *iommu;
-
- iommu = domain_get_iommu(domain);
+ int num, ndomains;
+ int found = 0;
spin_lock_irqsave(&iommu->lock, flags);
- clear_bit(domain->id, iommu->domain_ids);
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_bit(iommu->domain_ids, ndomains);
+ for (; num < ndomains; ) {
+ if (iommu->domains[num] == domain) {
+ found = 1;
+ break;
+ }
+ num = find_next_bit(iommu->domain_ids,
+ cap_ndoms(iommu->cap), num+1);
+ }
+
+ if (found) {
+ clear_bit(num, iommu->domain_ids);
+ clear_bit(iommu->seq_id, &domain->iommu_bmp);
+ iommu->domains[num] = NULL;
+ }
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -1350,6 +1386,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
u64 end;
/* Domain 0 is reserved, so dont process it */
@@ -1368,7 +1406,10 @@ static void domain_exit(struct dmar_domain *domain)
/* free page tables */
dma_pte_free_pagetable(domain, 0, end);
- iommu_free_domain(domain);
+ for_each_active_iommu(iommu, drhd)
+ if (test_bit(iommu->seq_id, &domain->iommu_bmp))
+ iommu_detach_domain(domain, iommu);
+
free_domain_mem(domain);
}
@@ -1408,7 +1449,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
id = domain->id;
pgd = domain->pgd;
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
int found = 0;
/* find an available domain id for this device in iommu */
@@ -1433,6 +1475,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
}
set_bit(num, iommu->domain_ids);
+ set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain;
id = num;
}
@@ -1675,6 +1718,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
unsigned long flags;
int bus = 0, devfn = 0;
int segment;
+ int ret;
domain = find_domain(pdev);
if (domain)
@@ -1707,6 +1751,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
}
}
+ domain = alloc_domain();
+ if (!domain)
+ goto error;
+
/* Allocate new domain for the device */
drhd = dmar_find_matched_drhd_unit(pdev);
if (!drhd) {
@@ -1716,9 +1764,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
}
iommu = drhd->iommu;
- domain = iommu_alloc_domain(iommu);
- if (!domain)
+ ret = iommu_attach_domain(domain, iommu);
+ if (ret) {
+ domain_exit(domain);
goto error;
+ }
if (domain_init(domain, gaw)) {
domain_exit(domain);
@@ -1792,6 +1842,8 @@ error:
return find_domain(pdev);
}
+static int iommu_identity_mapping;
+
static int iommu_prepare_identity_map(struct pci_dev *pdev,
unsigned long long start,
unsigned long long end)
@@ -1804,8 +1856,11 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
- /* page table init */
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (iommu_identity_mapping)
+ domain = si_domain;
+ else
+ /* page table init */
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
@@ -1852,7 +1907,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
rmrr->end_address + 1);
}
-#ifdef CONFIG_DMAR_GFX_WA
struct iommu_prepare_data {
struct pci_dev *pdev;
int ret;
@@ -1887,6 +1941,7 @@ static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
return data.ret;
}
+#ifdef CONFIG_DMAR_GFX_WA
static void __init iommu_prepare_gfx_mapping(void)
{
struct pci_dev *pdev = NULL;
@@ -1952,7 +2007,102 @@ static int __init init_context_pass_through(void)
return 0;
}
-static int __init init_dmars(void)
+static int md_domain_init(struct dmar_domain *domain, int guest_width);
+static int si_domain_init(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ int ret = 0;
+
+ si_domain = alloc_domain();
+ if (!si_domain)
+ return -EFAULT;
+
+
+ for_each_active_iommu(iommu, drhd) {
+ ret = iommu_attach_domain(si_domain, iommu);
+ if (ret) {
+ domain_exit(si_domain);
+ return -EFAULT;
+ }
+ }
+
+ if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ domain_exit(si_domain);
+ return -EFAULT;
+ }
+
+ si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+
+ return 0;
+}
+
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
+ struct pci_dev *pdev);
+static int identity_mapping(struct pci_dev *pdev)
+{
+ struct device_domain_info *info;
+
+ if (likely(!iommu_identity_mapping))
+ return 0;
+
+
+ list_for_each_entry(info, &si_domain->devices, link)
+ if (info->dev == pdev)
+ return 1;
+ return 0;
+}
+
+static int domain_add_dev_info(struct dmar_domain *domain,
+ struct pci_dev *pdev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ info = alloc_devinfo_mem();
+ if (!info)
+ return -ENOMEM;
+
+ info->segment = pci_domain_nr(pdev->bus);
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->dev = pdev;
+ info->domain = domain;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_add(&info->link, &domain->devices);
+ list_add(&info->global, &device_domain_list);
+ pdev->dev.archdata.iommu = info;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+static int iommu_prepare_static_identity_mapping(void)
+{
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ ret = si_domain_init();
+ if (ret)
+ return -EFAULT;
+
+ printk(KERN_INFO "IOMMU: Setting identity map:\n");
+ for_each_pci_dev(pdev) {
+ ret = iommu_prepare_with_active_regions(pdev);
+ if (ret) {
+ printk(KERN_INFO "1:1 mapping to one domain failed.\n");
+ return -EFAULT;
+ }
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
@@ -1962,6 +2112,13 @@ static int __init init_dmars(void)
int pass_through = 1;
/*
+ * In case pass through can not be enabled, iommu tries to use identity
+ * mapping.
+ */
+ if (iommu_pass_through)
+ iommu_identity_mapping = 1;
+
+ /*
* for each drhd
* allocate root
* initialize and program root entry to not present
@@ -2090,9 +2247,12 @@ static int __init init_dmars(void)
/*
* If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa.
+ * identity mappings for rmrr, gfx, and isa and may fall back to static
+ * identity mapping if iommu_identity_mapping is set.
*/
if (!iommu_pass_through) {
+ if (iommu_identity_mapping)
+ iommu_prepare_static_identity_mapping();
/*
* For each rmrr
* for each dev attached to rmrr
@@ -2107,6 +2267,7 @@ static int __init init_dmars(void)
* endfor
* endfor
*/
+ printk(KERN_INFO "IOMMU: Setting RMRR:\n");
for_each_rmrr_units(rmrr) {
for (i = 0; i < rmrr->devices_cnt; i++) {
pdev = rmrr->devices[i];
@@ -2248,6 +2409,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain;
}
+static int iommu_dummy(struct pci_dev *pdev)
+{
+ return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
+/* Check if the pdev needs to go through non-identity map and unmap process.*/
+static int iommu_no_mapping(struct pci_dev *pdev)
+{
+ int found;
+
+ if (!iommu_identity_mapping)
+ return iommu_dummy(pdev);
+
+ found = identity_mapping(pdev);
+ if (found) {
+ if (pdev->dma_mask > DMA_BIT_MASK(32))
+ return 1;
+ else {
+ /*
+ * 32 bit DMA is removed from si_domain and fall back
+ * to non-identity mapping.
+ */
+ domain_remove_one_dev_info(si_domain, pdev);
+ printk(KERN_INFO "32bit %s uses non-identity mapping\n",
+ pci_name(pdev));
+ return 0;
+ }
+ } else {
+ /*
+ * In case of a detached 64 bit DMA device from vm, the device
+ * is put into si_domain for identity mapping.
+ */
+ if (pdev->dma_mask > DMA_BIT_MASK(32)) {
+ int ret;
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (!ret) {
+ printk(KERN_INFO "64bit %s uses identity mapping\n",
+ pci_name(pdev));
+ return 1;
+ }
+ }
+ }
+
+ return iommu_dummy(pdev);
+}
+
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
@@ -2260,7 +2467,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+
+ if (iommu_no_mapping(pdev))
return paddr;
domain = get_valid_domain_for_dev(pdev);
@@ -2401,8 +2609,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
struct iova *iova;
struct intel_iommu *iommu;
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(pdev))
return;
+
domain = find_domain(pdev);
BUG_ON(!domain);
@@ -2492,7 +2701,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
struct scatterlist *sg;
struct intel_iommu *iommu;
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(pdev))
return;
domain = find_domain(pdev);
@@ -2553,7 +2762,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(pdev))
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
@@ -2951,31 +3160,6 @@ int __init intel_iommu_init(void)
return 0;
}
-static int vm_domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- info = alloc_devinfo_mem();
- if (!info)
- return -ENOMEM;
-
- info->segment = pci_domain_nr(pdev->bus);
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->dev = pdev;
- info->domain = domain;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- pdev->dev.archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return 0;
-}
-
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
struct pci_dev *pdev)
{
@@ -3003,7 +3187,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
}
}
-static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev)
{
struct device_domain_info *info;
@@ -3136,7 +3320,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
return domain;
}
-static int vm_domain_init(struct dmar_domain *domain, int guest_width)
+static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
@@ -3227,7 +3411,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
"intel_iommu_domain_init: dmar_domain == NULL\n");
return -ENOMEM;
}
- if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
vm_domain_exit(dmar_domain);
@@ -3262,8 +3446,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
old_domain = find_domain(pdev);
if (old_domain) {
- if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_remove_one_dev_info(old_domain, pdev);
+ if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
+ domain_remove_one_dev_info(old_domain, pdev);
else
domain_remove_dev_info(old_domain);
}
@@ -3285,7 +3470,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT;
}
- ret = vm_domain_add_dev_info(dmar_domain, pdev);
+ ret = domain_add_dev_info(dmar_domain, pdev);
if (ret)
return ret;
@@ -3299,7 +3484,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = domain->priv;
struct pci_dev *pdev = to_pci_dev(dev);
- vm_domain_remove_one_dev_info(dmar_domain, pdev);
+ domain_remove_one_dev_info(dmar_domain, pdev);
}
static int intel_iommu_map_range(struct iommu_domain *domain,
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 1e83c8c5f98..4f5b8712931 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -10,6 +10,8 @@
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
#include <acpi/acpi.h>
+#include <asm/pci-direct.h>
+#include "pci.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified)
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
- set_64bit((unsigned long *)irte, irte_modified->low);
+ set_64bit((unsigned long *)&irte->low, irte_modified->low);
+ set_64bit((unsigned long *)&irte->high, irte_modified->high);
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
return drhd->iommu;
}
+static int clear_entries(struct irq_2_iommu *irq_iommu)
+{
+ struct irte *start, *entry, *end;
+ struct intel_iommu *iommu;
+ int index;
+
+ if (irq_iommu->sub_handle)
+ return 0;
+
+ iommu = irq_iommu->iommu;
+ index = irq_iommu->irte_index + irq_iommu->sub_handle;
+
+ start = iommu->ir_table->base + index;
+ end = start + (1 << irq_iommu->irte_mask);
+
+ for (entry = start; entry < end; entry++) {
+ set_64bit((unsigned long *)&entry->low, 0);
+ set_64bit((unsigned long *)&entry->high, 0);
+ }
+
+ return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+}
+
int free_irte(int irq)
{
int rc = 0;
- int index, i;
- struct irte *irte;
- struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
unsigned long flags;
@@ -385,16 +408,7 @@ int free_irte(int irq)
return -1;
}
- iommu = irq_iommu->iommu;
-
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
- irte = &iommu->ir_table->base[index];
-
- if (!irq_iommu->sub_handle) {
- for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
- set_64bit((unsigned long *)(irte + i), 0);
- rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
- }
+ rc = clear_entries(irq_iommu);
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
@@ -406,6 +420,91 @@ int free_irte(int irq)
return rc;
}
+/*
+ * source validation type
+ */
+#define SVT_NO_VERIFY 0x0 /* no verification is required */
+#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
+#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
+
+/*
+ * source-id qualifier
+ */
+#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
+#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
+ * the third least significant bit
+ */
+#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
+ * the second and third least significant bits
+ */
+#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
+ * the least three significant bits
+ */
+
+/*
+ * set SVT, SQ and SID fields of irte to verify
+ * source ids of interrupt requests
+ */
+static void set_irte_sid(struct irte *irte, unsigned int svt,
+ unsigned int sq, unsigned int sid)
+{
+ irte->svt = svt;
+ irte->sq = sq;
+ irte->sid = sid;
+}
+
+int set_ioapic_sid(struct irte *irte, int apic)
+{
+ int i;
+ u16 sid = 0;
+
+ if (!irte)
+ return -1;
+
+ for (i = 0; i < MAX_IO_APICS; i++) {
+ if (ir_ioapic[i].id == apic) {
+ sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
+ break;
+ }
+ }
+
+ if (sid == 0) {
+ pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
+ return -1;
+ }
+
+ set_irte_sid(irte, 1, 0, sid);
+
+ return 0;
+}
+
+int set_msi_sid(struct irte *irte, struct pci_dev *dev)
+{
+ struct pci_dev *bridge;
+
+ if (!irte || !dev)
+ return -1;
+
+ /* PCIe device or Root Complex integrated PCI device */
+ if (dev->is_pcie || !dev->bus->parent) {
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+ (dev->bus->number << 8) | dev->devfn);
+ return 0;
+ }
+
+ bridge = pci_find_upstream_pcie_bridge(dev);
+ if (bridge) {
+ if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
+ set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
+ (bridge->bus->number << 8) | dev->bus->number);
+ else /* this is a legacy PCI bridge */
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+ (bridge->bus->number << 8) | bridge->devfn);
+ }
+
+ return 0;
+}
+
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
{
u64 addr;
@@ -612,6 +711,35 @@ error:
return -1;
}
+static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu)
+{
+ struct acpi_dmar_pci_path *path;
+ u8 bus;
+ int count;
+
+ bus = scope->bus;
+ path = (struct acpi_dmar_pci_path *)(scope + 1);
+ count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+ / sizeof(struct acpi_dmar_pci_path);
+
+ while (--count > 0) {
+ /*
+ * Access PCI directly due to the PCI
+ * subsystem isn't initialized yet.
+ */
+ bus = read_pci_config_byte(bus, path->dev, path->fn,
+ PCI_SECONDARY_BUS);
+ path++;
+ }
+
+ ir_ioapic[ir_ioapic_num].bus = bus;
+ ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_ioapic[ir_ioapic_num].iommu = iommu;
+ ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
+ ir_ioapic_num++;
+}
+
static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
@@ -636,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
" 0x%Lx\n", scope->enumeration_id,
drhd->address);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
+ ir_parse_one_ioapic_scope(scope, iommu);
}
start += scope->length;
}
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
index ca48f0df8ac..63a263c1841 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/pci/intr_remapping.h
@@ -3,6 +3,8 @@
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
+ unsigned int bus; /* PCI bus number */
+ unsigned int devfn; /* PCI devfn number */
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 2287116e982..46dd440e231 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -1,9 +1,19 @@
/*
- * Copyright (c) 2006, Intel Corporation.
+ * Copyright © 2006-2009, Intel Corporation.
*
- * This file is released under the GPLv2.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
*
- * Copyright (C) 2006-2008 Intel Corporation
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
@@ -123,7 +133,15 @@ move_left:
/* Insert the new_iova into domain rbtree by holding writer lock */
/* Add new node and rebalance tree. */
{
- struct rb_node **entry = &((prev)), *parent = NULL;
+ struct rb_node **entry, *parent = NULL;
+
+ /* If we have 'prev', it's a valid place to start the
+ insertion. Otherwise, start from the root. */
+ if (prev)
+ entry = &prev;
+ else
+ entry = &iovad->rbroot.rb_node;
+
/* Figure out where to put new node */
while (*entry) {
struct iova *this = container_of(*entry,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c682ac53641..46dad12f952 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -34,10 +34,27 @@ config ACER_WMI
If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
here.
+config ACERHDF
+ tristate "Acer Aspire One temperature and fan driver"
+ depends on THERMAL && THERMAL_HWMON && ACPI
+ ---help---
+ This is a driver for Acer Aspire One netbooks. It allows to access
+ the temperature sensor and to control the fan.
+
+ After loading this driver the BIOS is still in control of the fan.
+ To let the kernel handle the fan, do:
+ echo -n enabled > /sys/class/thermal/thermal_zone0/mode
+
+ For more information about this driver see
+ <http://piie.net/files/acerhdf_README.txt>
+
+ If you have an Acer Aspire One netbook, say Y or M
+ here.
+
config ASUS_LAPTOP
- tristate "Asus Laptop Extras (EXPERIMENTAL)"
+ tristate "Asus Laptop Extras"
depends on ACPI
- depends on EXPERIMENTAL && !ACPI_ASUS
+ depends on !ACPI_ASUS
select LEDS_CLASS
select NEW_LEDS
select BACKLIGHT_CLASS_DEVICE
@@ -45,12 +62,12 @@ config ASUS_LAPTOP
---help---
This is the new Linux driver for Asus laptops. It may also support some
MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
- standard ACPI events that go through /proc/acpi/events. It also adds
+ standard ACPI events and input events. It also adds
support for video output switching, LCD backlight control, Bluetooth and
Wlan control, and most importantly, allows you to blink those fancy LEDs.
For more information and a userspace daemon for handling the extra
- buttons see <http://acpi4asus.sf.net/>.
+ buttons see <http://acpi4asus.sf.net>.
If you have an ACPI-compatible ASUS laptop, say Y or M here.
@@ -338,11 +355,15 @@ config EEEPC_LAPTOP
depends on INPUT
depends on EXPERIMENTAL
depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
select BACKLIGHT_CLASS_DEVICE
select HWMON
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
- It also adds the ability to switch camera/wlan on/off.
+
+ It also gives access to some extra laptop functionalities like
+ Bluetooth, backlight and allows powering on/off some other
+ devices.
If you have an Eee PC laptop, say Y or M here.
@@ -369,7 +390,7 @@ config ACPI_WMI
any ACPI-WMI devices.
config ACPI_ASUS
- tristate "ASUS/Medion Laptop Extras"
+ tristate "ASUS/Medion Laptop Extras (DEPRECATED)"
depends on ACPI
select BACKLIGHT_CLASS_DEVICE
---help---
@@ -390,7 +411,7 @@ config ACPI_ASUS
parameters.
More information and a userspace daemon for handling the extra buttons
- at <http://sourceforge.net/projects/acpi4asus/>.
+ at <http://acpi4asus.sf.net>.
If you have an ACPI-compatible ASUS laptop, say Y or M here. This
driver is still under development, so if your laptop is unsupported or
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index e40c7bd1b87..641b8bfa553 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
+obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
new file mode 100644
index 00000000000..bdfee177eef
--- /dev/null
+++ b/drivers/platform/x86/acerhdf.c
@@ -0,0 +1,602 @@
+/*
+ * acerhdf - A driver which monitors the temperature
+ * of the aspire one netbook, turns on/off the fan
+ * as soon as the upper/lower threshold is reached.
+ *
+ * (C) 2009 - Peter Feuerer peter (a) piie.net
+ * http://piie.net
+ * 2009 Borislav Petkov <petkovbb@gmail.com>
+ *
+ * Inspired by and many thanks to:
+ * o acerfand - Rachel Greenham
+ * o acer_ec.pl - Michael Kurz michi.kurz (at) googlemail.com
+ * - Petr Tomasek tomasek (#) etf,cuni,cz
+ * - Carlos Corbacho cathectic (at) gmail.com
+ * o lkml - Matthew Garrett
+ * - Borislav Petkov
+ * - Andreas Mohr
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define pr_fmt(fmt) "acerhdf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/dmi.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/sched.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/*
+ * The driver is started with "kernel mode off" by default. That means, the BIOS
+ * is still in control of the fan. In this mode the driver allows to read the
+ * temperature of the cpu and a userspace tool may take over control of the fan.
+ * If the driver is switched to "kernel mode" (e.g. via module parameter) the
+ * driver is in full control of the fan. If you want the module to be started in
+ * kernel mode by default, define the following:
+ */
+#undef START_IN_KERNEL_MODE
+
+#define DRV_VER "0.5.13"
+
+/*
+ * According to the Atom N270 datasheet,
+ * (http://download.intel.com/design/processor/datashts/320032.pdf) the
+ * CPU's optimal operating limits denoted in junction temperature as
+ * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
+ * assume 89°C is critical temperature.
+ */
+#define ACERHDF_TEMP_CRIT 89
+#define ACERHDF_FAN_OFF 0
+#define ACERHDF_FAN_AUTO 1
+
+/*
+ * No matter what value the user puts into the fanon variable, turn on the fan
+ * at 80 degree Celsius to prevent hardware damage
+ */
+#define ACERHDF_MAX_FANON 80
+
+/*
+ * Maximum interval between two temperature checks is 15 seconds, as the die
+ * can get hot really fast under heavy load (plus we shouldn't forget about
+ * possible impact of _external_ aggressive sources such as heaters, sun etc.)
+ */
+#define ACERHDF_MAX_INTERVAL 15
+
+#ifdef START_IN_KERNEL_MODE
+static int kernelmode = 1;
+#else
+static int kernelmode;
+#endif
+
+static unsigned int interval = 10;
+static unsigned int fanon = 63;
+static unsigned int fanoff = 58;
+static unsigned int verbose;
+static unsigned int fanstate = ACERHDF_FAN_AUTO;
+static char force_bios[16];
+static unsigned int prev_interval;
+struct thermal_zone_device *thz_dev;
+struct thermal_cooling_device *cl_dev;
+struct platform_device *acerhdf_dev;
+
+module_param(kernelmode, uint, 0);
+MODULE_PARM_DESC(kernelmode, "Kernel mode fan control on / off");
+module_param(interval, uint, 0600);
+MODULE_PARM_DESC(interval, "Polling interval of temperature check");
+module_param(fanon, uint, 0600);
+MODULE_PARM_DESC(fanon, "Turn the fan on above this temperature");
+module_param(fanoff, uint, 0600);
+MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature");
+module_param(verbose, uint, 0600);
+MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+module_param_string(force_bios, force_bios, 16, 0);
+MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check");
+
+/* BIOS settings */
+struct bios_settings_t {
+ const char *vendor;
+ const char *version;
+ unsigned char fanreg;
+ unsigned char tempreg;
+ unsigned char fancmd[2]; /* fan off and auto commands */
+};
+
+/* Register addresses and values for different BIOS versions */
+static const struct bios_settings_t bios_tbl[] = {
+ {"Acer", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ {"Gateway", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
+ {"", "", 0, 0, {0, 0} }
+};
+
+static const struct bios_settings_t *bios_cfg __read_mostly;
+
+
+static int acerhdf_get_temp(int *temp)
+{
+ u8 read_temp;
+
+ if (ec_read(bios_cfg->tempreg, &read_temp))
+ return -EINVAL;
+
+ *temp = read_temp;
+
+ return 0;
+}
+
+static int acerhdf_get_fanstate(int *state)
+{
+ u8 fan;
+ bool tmp;
+
+ if (ec_read(bios_cfg->fanreg, &fan))
+ return -EINVAL;
+
+ tmp = (fan == bios_cfg->fancmd[ACERHDF_FAN_OFF]);
+ *state = tmp ? ACERHDF_FAN_OFF : ACERHDF_FAN_AUTO;
+
+ return 0;
+}
+
+static void acerhdf_change_fanstate(int state)
+{
+ unsigned char cmd;
+
+ if (verbose)
+ pr_notice("fan %s\n", (state == ACERHDF_FAN_OFF) ?
+ "OFF" : "ON");
+
+ if ((state != ACERHDF_FAN_OFF) && (state != ACERHDF_FAN_AUTO)) {
+ pr_err("invalid fan state %d requested, setting to auto!\n",
+ state);
+ state = ACERHDF_FAN_AUTO;
+ }
+
+ cmd = bios_cfg->fancmd[state];
+ fanstate = state;
+
+ ec_write(bios_cfg->fanreg, cmd);
+}
+
+static void acerhdf_check_param(struct thermal_zone_device *thermal)
+{
+ if (fanon > ACERHDF_MAX_FANON) {
+ pr_err("fanon temperature too high, set to %d\n",
+ ACERHDF_MAX_FANON);
+ fanon = ACERHDF_MAX_FANON;
+ }
+
+ if (kernelmode && prev_interval != interval) {
+ if (interval > ACERHDF_MAX_INTERVAL) {
+ pr_err("interval too high, set to %d\n",
+ ACERHDF_MAX_INTERVAL);
+ interval = ACERHDF_MAX_INTERVAL;
+ }
+ if (verbose)
+ pr_notice("interval changed to: %d\n",
+ interval);
+ thermal->polling_delay = interval*1000;
+ prev_interval = interval;
+ }
+}
+
+/*
+ * This is the thermal zone callback which does the delayed polling of the fan
+ * state. We do check /sysfs-originating settings here in acerhdf_check_param()
+ * as late as the polling interval is since we can't do that in the respective
+ * accessors of the module parameters.
+ */
+static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal,
+ unsigned long *t)
+{
+ int temp, err = 0;
+
+ acerhdf_check_param(thermal);
+
+ err = acerhdf_get_temp(&temp);
+ if (err)
+ return err;
+
+ if (verbose)
+ pr_notice("temp %d\n", temp);
+
+ *t = temp;
+ return 0;
+}
+
+static int acerhdf_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ /* if the cooling device is the one from acerhdf bind it */
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) {
+ pr_err("error binding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int acerhdf_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) {
+ pr_err("error unbinding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void acerhdf_revert_to_bios_mode(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ kernelmode = 0;
+ if (thz_dev)
+ thz_dev->polling_delay = 0;
+ pr_notice("kernel mode fan control OFF\n");
+}
+static inline void acerhdf_enable_kernelmode(void)
+{
+ kernelmode = 1;
+
+ thz_dev->polling_delay = interval*1000;
+ thermal_zone_device_update(thz_dev);
+ pr_notice("kernel mode fan control ON\n");
+}
+
+static int acerhdf_get_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode *mode)
+{
+ if (verbose)
+ pr_notice("kernel mode fan control %d\n", kernelmode);
+
+ *mode = (kernelmode) ? THERMAL_DEVICE_ENABLED
+ : THERMAL_DEVICE_DISABLED;
+
+ return 0;
+}
+
+/*
+ * set operation mode;
+ * enabled: the thermal layer of the kernel takes care about
+ * the temperature and the fan.
+ * disabled: the BIOS takes control of the fan.
+ */
+static int acerhdf_set_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+{
+ if (mode == THERMAL_DEVICE_DISABLED && kernelmode)
+ acerhdf_revert_to_bios_mode();
+ else if (mode == THERMAL_DEVICE_ENABLED && !kernelmode)
+ acerhdf_enable_kernelmode();
+
+ return 0;
+}
+
+static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
+ enum thermal_trip_type *type)
+{
+ if (trip == 0)
+ *type = THERMAL_TRIP_ACTIVE;
+
+ return 0;
+}
+
+static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
+ unsigned long *temp)
+{
+ if (trip == 0)
+ *temp = fanon;
+
+ return 0;
+}
+
+static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
+ unsigned long *temperature)
+{
+ *temperature = ACERHDF_TEMP_CRIT;
+ return 0;
+}
+
+/* bind callback functions to thermalzone */
+struct thermal_zone_device_ops acerhdf_dev_ops = {
+ .bind = acerhdf_bind,
+ .unbind = acerhdf_unbind,
+ .get_temp = acerhdf_get_ec_temp,
+ .get_mode = acerhdf_get_mode,
+ .set_mode = acerhdf_set_mode,
+ .get_trip_type = acerhdf_get_trip_type,
+ .get_trip_temp = acerhdf_get_trip_temp,
+ .get_crit_temp = acerhdf_get_crit_temp,
+};
+
+
+/*
+ * cooling device callback functions
+ * get maximal fan cooling state
+ */
+static int acerhdf_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int acerhdf_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ int err = 0, tmp;
+
+ err = acerhdf_get_fanstate(&tmp);
+ if (err)
+ return err;
+
+ *state = (tmp == ACERHDF_FAN_AUTO) ? 1 : 0;
+ return 0;
+}
+
+/* change current fan state - is overwritten when running in kernel mode */
+static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ int cur_temp, cur_state, err = 0;
+
+ if (!kernelmode)
+ return 0;
+
+ err = acerhdf_get_temp(&cur_temp);
+ if (err) {
+ pr_err("error reading temperature, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ err = acerhdf_get_fanstate(&cur_state);
+ if (err) {
+ pr_err("error reading fan state, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ if (state == 0) {
+ /* turn fan off only if below fanoff temperature */
+ if ((cur_state == ACERHDF_FAN_AUTO) &&
+ (cur_temp < fanoff))
+ acerhdf_change_fanstate(ACERHDF_FAN_OFF);
+ } else {
+ if (cur_state == ACERHDF_FAN_OFF)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ }
+ return 0;
+
+err_out:
+ acerhdf_revert_to_bios_mode();
+ return -EINVAL;
+}
+
+/* bind fan callbacks to fan device */
+struct thermal_cooling_device_ops acerhdf_cooling_ops = {
+ .get_max_state = acerhdf_get_max_state,
+ .get_cur_state = acerhdf_get_cur_state,
+ .set_cur_state = acerhdf_set_cur_state,
+};
+
+/* suspend / resume functionality */
+static int acerhdf_suspend(struct platform_device *dev, pm_message_t state)
+{
+ if (kernelmode)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+
+ if (verbose)
+ pr_notice("going suspend\n");
+
+ return 0;
+}
+
+static int acerhdf_resume(struct platform_device *device)
+{
+ if (verbose)
+ pr_notice("resuming\n");
+
+ return 0;
+}
+
+static int __devinit acerhdf_probe(struct platform_device *device)
+{
+ return 0;
+}
+
+static int acerhdf_remove(struct platform_device *device)
+{
+ return 0;
+}
+
+struct platform_driver acerhdf_drv = {
+ .driver = {
+ .name = "acerhdf",
+ .owner = THIS_MODULE,
+ },
+ .probe = acerhdf_probe,
+ .remove = acerhdf_remove,
+ .suspend = acerhdf_suspend,
+ .resume = acerhdf_resume,
+};
+
+
+/* check hardware */
+static int acerhdf_check_hardware(void)
+{
+ char const *vendor, *version, *product;
+ int i;
+
+ /* get BIOS data */
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ version = dmi_get_system_info(DMI_BIOS_VERSION);
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER);
+
+ if (!force_bios[0]) {
+ if (strncmp(product, "AO", 2)) {
+ pr_err("no Aspire One hardware found\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_info("forcing BIOS version: %s\n", version);
+ version = force_bios;
+ kernelmode = 0;
+ }
+
+ if (verbose)
+ pr_info("BIOS info: %s %s, product: %s\n",
+ vendor, version, product);
+
+ /* search BIOS version and vendor in BIOS settings table */
+ for (i = 0; bios_tbl[i].version[0]; i++) {
+ if (!strcmp(bios_tbl[i].vendor, vendor) &&
+ !strcmp(bios_tbl[i].version, version)) {
+ bios_cfg = &bios_tbl[i];
+ break;
+ }
+ }
+
+ if (!bios_cfg) {
+ pr_err("unknown (unsupported) BIOS version %s/%s, "
+ "please report, aborting!\n", vendor, version);
+ return -EINVAL;
+ }
+
+ /*
+ * if started with kernel mode off, prevent the kernel from switching
+ * off the fan
+ */
+ if (!kernelmode) {
+ pr_notice("Fan control off, to enable do:\n");
+ pr_notice("echo -n \"enabled\" > "
+ "/sys/class/thermal/thermal_zone0/mode\n");
+ }
+
+ return 0;
+}
+
+static int acerhdf_register_platform(void)
+{
+ int err = 0;
+
+ err = platform_driver_register(&acerhdf_drv);
+ if (err)
+ return err;
+
+ acerhdf_dev = platform_device_alloc("acerhdf", -1);
+ platform_device_add(acerhdf_dev);
+
+ return 0;
+}
+
+static void acerhdf_unregister_platform(void)
+{
+ if (!acerhdf_dev)
+ return;
+
+ platform_device_del(acerhdf_dev);
+ platform_driver_unregister(&acerhdf_drv);
+}
+
+static int acerhdf_register_thermal(void)
+{
+ cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL,
+ &acerhdf_cooling_ops);
+
+ if (IS_ERR(cl_dev))
+ return -EINVAL;
+
+ thz_dev = thermal_zone_device_register("acerhdf", 1, NULL,
+ &acerhdf_dev_ops, 0, 0, 0,
+ (kernelmode) ? interval*1000 : 0);
+ if (IS_ERR(thz_dev))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void acerhdf_unregister_thermal(void)
+{
+ if (cl_dev) {
+ thermal_cooling_device_unregister(cl_dev);
+ cl_dev = NULL;
+ }
+
+ if (thz_dev) {
+ thermal_zone_device_unregister(thz_dev);
+ thz_dev = NULL;
+ }
+}
+
+static int __init acerhdf_init(void)
+{
+ int err = 0;
+
+ err = acerhdf_check_hardware();
+ if (err)
+ goto out_err;
+
+ err = acerhdf_register_platform();
+ if (err)
+ goto err_unreg;
+
+ err = acerhdf_register_thermal();
+ if (err)
+ goto err_unreg;
+
+ return 0;
+
+err_unreg:
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+
+out_err:
+ return -ENODEV;
+}
+
+static void __exit acerhdf_exit(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Feuerer");
+MODULE_DESCRIPTION("Aspire One temperature and fan driver");
+MODULE_ALIAS("dmi:*:*Acer*:*:");
+MODULE_ALIAS("dmi:*:*Gateway*:*:");
+MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
+
+module_init(acerhdf_init);
+module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index bfc1a8892a3..db657bbeec9 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -33,6 +33,8 @@
* Sam Lin - GPS support
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -53,9 +55,10 @@
#define ASUS_HOTK_NAME "Asus Laptop Support"
#define ASUS_HOTK_CLASS "hotkey"
#define ASUS_HOTK_DEVICE_NAME "Hotkey"
-#define ASUS_HOTK_FILE "asus-laptop"
+#define ASUS_HOTK_FILE KBUILD_MODNAME
#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
+
/*
* Some events we use, same for all Asus
*/
@@ -207,13 +210,17 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids);
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
+static void asus_hotk_notify(struct acpi_device *device, u32 event);
+
static struct acpi_driver asus_hotk_driver = {
.name = ASUS_HOTK_NAME,
.class = ASUS_HOTK_CLASS,
.ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
+ .notify = asus_hotk_notify,
},
};
@@ -323,7 +330,7 @@ static int read_wireless_status(int mask)
rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading Wireless status\n");
+ pr_warning("Error reading Wireless status\n");
else
return (status & mask) ? 1 : 0;
@@ -337,7 +344,7 @@ static int read_gps_status(void)
rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading GPS status\n");
+ pr_warning("Error reading GPS status\n");
else
return status ? 1 : 0;
@@ -377,7 +384,7 @@ static void write_status(acpi_handle handle, int out, int mask)
}
if (write_acpi_int(handle, NULL, out, NULL))
- printk(ASUS_WARNING " write failed %x\n", mask);
+ pr_warning(" write failed %x\n", mask);
}
/* /sys/class/led handlers */
@@ -420,7 +427,7 @@ static int set_lcd_state(int value)
NULL, NULL, NULL);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Error switching LCD\n");
+ pr_warning("Error switching LCD\n");
}
write_status(NULL, lcd, LCD_ON);
@@ -444,7 +451,7 @@ static int read_brightness(struct backlight_device *bd)
rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading brightness\n");
+ pr_warning("Error reading brightness\n");
return value;
}
@@ -457,7 +464,7 @@ static int set_brightness(struct backlight_device *bd, int value)
/* 0 <= value <= 15 */
if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
- printk(ASUS_WARNING "Error changing brightness\n");
+ pr_warning("Error changing brightness\n");
ret = -EIO;
}
@@ -587,7 +594,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
rv = parse_arg(buf, count, &value);
if (rv > 0) {
if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
- printk(ASUS_WARNING "LED display write failed\n");
+ pr_warning("LED display write failed\n");
else
hotk->ledd_status = (u32) value;
}
@@ -632,7 +639,7 @@ static void set_display(int value)
{
/* no sanity check needed for now */
if (write_acpi_int(display_set_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting display\n");
+ pr_warning("Error setting display\n");
return;
}
@@ -647,7 +654,7 @@ static int read_display(void)
rv = acpi_evaluate_integer(display_get_handle, NULL,
NULL, &value);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading display status\n");
+ pr_warning("Error reading display status\n");
}
value &= 0x0F; /* needed for some models, shouldn't hurt others */
@@ -689,7 +696,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
static void set_light_sens_switch(int value)
{
if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting light sensor switch\n");
+ pr_warning("Error setting light sensor switch\n");
hotk->light_switch = value;
}
@@ -714,7 +721,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
static void set_light_sens_level(int value)
{
if (write_acpi_int(ls_level_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting light sensor level\n");
+ pr_warning("Error setting light sensor level\n");
hotk->light_level = value;
}
@@ -812,7 +819,7 @@ static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode)
return -EINVAL;
}
-static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void asus_hotk_notify(struct acpi_device *device, u32 event)
{
static struct key_entry *key;
u16 count;
@@ -975,11 +982,11 @@ static int asus_hotk_get_info(void)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
+ pr_warning("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
- printk(ASUS_ERR "Hotkey initialization failed\n");
+ pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
@@ -987,9 +994,9 @@ static int asus_hotk_get_info(void)
status =
acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Error calling BSTS\n");
+ pr_warning("Error calling BSTS\n");
else if (bsts_result)
- printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n",
+ pr_notice("BSTS called, 0x%02x returned\n",
(uint) bsts_result);
/* This too ... */
@@ -1020,7 +1027,7 @@ static int asus_hotk_get_info(void)
return -ENOMEM;
if (*string)
- printk(ASUS_NOTICE " %s model detected\n", string);
+ pr_notice(" %s model detected\n", string);
ASUS_HANDLE_INIT(mled_set);
ASUS_HANDLE_INIT(tled_set);
@@ -1077,7 +1084,7 @@ static int asus_input_init(void)
hotk->inputdev = input_allocate_device();
if (!hotk->inputdev) {
- printk(ASUS_INFO "Unable to allocate input device\n");
+ pr_info("Unable to allocate input device\n");
return 0;
}
hotk->inputdev->name = "Asus Laptop extra buttons";
@@ -1096,7 +1103,7 @@ static int asus_input_init(void)
}
result = input_register_device(hotk->inputdev);
if (result) {
- printk(ASUS_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
input_free_device(hotk->inputdev);
}
return result;
@@ -1113,7 +1120,7 @@ static int asus_hotk_check(void)
if (hotk->device->status.present) {
result = asus_hotk_get_info();
} else {
- printk(ASUS_ERR "Hotkey device not present, aborting\n");
+ pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
}
@@ -1124,13 +1131,12 @@ static int asus_hotk_found;
static int asus_hotk_add(struct acpi_device *device)
{
- acpi_status status = AE_OK;
int result;
if (!device)
return -EINVAL;
- printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
+ pr_notice("Asus Laptop Support version %s\n",
ASUS_LAPTOP_VERSION);
hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
@@ -1149,15 +1155,6 @@ static int asus_hotk_add(struct acpi_device *device)
asus_hotk_add_fs();
- /*
- * We install the handler, it will receive the hotk in parameter, so, we
- * could add other data to the hotk struct
- */
- status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
- asus_hotk_notify, hotk);
- if (ACPI_FAILURE(status))
- printk(ASUS_ERR "Error installing notify handler\n");
-
asus_hotk_found = 1;
/* WLED and BLED are on by default */
@@ -1198,16 +1195,9 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
- asus_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(ASUS_ERR "Error removing notify handler\n");
-
kfree(hotk->name);
kfree(hotk);
@@ -1260,8 +1250,7 @@ static int asus_backlight_init(struct device *dev)
bd = backlight_device_register(ASUS_HOTK_FILE, dev,
NULL, &asusbl_ops);
if (IS_ERR(bd)) {
- printk(ASUS_ERR
- "Could not register asus backlight device\n");
+ pr_err("Could not register asus backlight device\n");
asus_backlight_device = NULL;
return PTR_ERR(bd);
}
@@ -1334,7 +1323,6 @@ out:
static int __init asus_laptop_init(void)
{
- struct device *dev;
int result;
if (acpi_disabled)
@@ -1356,24 +1344,10 @@ static int __init asus_laptop_init(void)
return -ENODEV;
}
- dev = acpi_get_physical_device(hotk->device->handle);
-
- if (!acpi_video_backlight_support()) {
- result = asus_backlight_init(dev);
- if (result)
- goto fail_backlight;
- } else
- printk(ASUS_INFO "Brightness ignored, must be controlled by "
- "ACPI video driver\n");
-
result = asus_input_init();
if (result)
goto fail_input;
- result = asus_led_init(dev);
- if (result)
- goto fail_led;
-
/* Register platform stuff */
result = platform_driver_register(&asuspf_driver);
if (result)
@@ -1394,8 +1368,27 @@ static int __init asus_laptop_init(void)
if (result)
goto fail_sysfs;
+ result = asus_led_init(&asuspf_device->dev);
+ if (result)
+ goto fail_led;
+
+ if (!acpi_video_backlight_support()) {
+ result = asus_backlight_init(&asuspf_device->dev);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Brightness ignored, must be controlled by "
+ "ACPI video driver\n");
+
return 0;
+fail_backlight:
+ asus_led_exit();
+
+fail_led:
+ sysfs_remove_group(&asuspf_device->dev.kobj,
+ &asuspf_attribute_group);
+
fail_sysfs:
platform_device_del(asuspf_device);
@@ -1406,15 +1399,9 @@ fail_platform_device1:
platform_driver_unregister(&asuspf_driver);
fail_platform_driver:
- asus_led_exit();
-
-fail_led:
asus_input_exit();
fail_input:
- asus_backlight_exit();
-
-fail_backlight:
return result;
}
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ba1f7497e4b..ddf5240ade8 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -455,6 +455,8 @@ static struct asus_hotk *hotk;
*/
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
+static void asus_hotk_notify(struct acpi_device *device, u32 event);
+
static const struct acpi_device_id asus_device_ids[] = {
{"ATK0100", 0},
{"", 0},
@@ -465,9 +467,11 @@ static struct acpi_driver asus_hotk_driver = {
.name = "asus_acpi",
.class = ACPI_HOTK_CLASS,
.ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
+ .notify = asus_hotk_notify,
},
};
@@ -1101,12 +1105,20 @@ static int asus_hotk_remove_fs(struct acpi_device *device)
return 0;
}
-static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void asus_hotk_notify(struct acpi_device *device, u32 event)
{
/* TODO Find a better way to handle events count. */
if (!hotk)
return;
+ /*
+ * The BIOS *should* be sending us device events, but apparently
+ * Asus uses system events instead, so just ignore any device
+ * events we get.
+ */
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
+
if ((event & ~((u32) BR_UP)) < 16)
hotk->brightness = (event & ~((u32) BR_UP));
else if ((event & ~((u32) BR_DOWN)) < 16)
@@ -1346,15 +1358,6 @@ static int asus_hotk_add(struct acpi_device *device)
if (result)
goto end;
- /*
- * We install the handler, it will receive the hotk in parameter, so, we
- * could add other data to the hotk struct
- */
- status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
- asus_hotk_notify, hotk);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR " Error installing notify handler\n");
-
/* For laptops without GPLV: init the hotk->brightness value */
if ((!hotk->methods->brightness_get)
&& (!hotk->methods->brightness_status)
@@ -1389,16 +1392,9 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
- asus_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR "Asus ACPI: Error removing notify handler\n");
-
asus_hotk_remove_fs(device);
kfree(hotk);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 2fab9416214..0f900cc9fa7 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -46,10 +46,53 @@ struct key_entry {
u16 keycode;
};
-enum { KE_KEY, KE_SW, KE_END };
+enum { KE_KEY, KE_SW, KE_IGNORE, KE_END };
+
+/*
+ * Certain keys are flagged as KE_IGNORE. All of these are either
+ * notifications (rather than requests for change) or are also sent
+ * via the keyboard controller so should not be sent again.
+ */
static struct key_entry dell_wmi_keymap[] = {
{KE_KEY, 0xe045, KEY_PROG1},
+ {KE_KEY, 0xe009, KEY_EJECTCD},
+
+ /* These also contain the brightness level at offset 6 */
+ {KE_KEY, 0xe006, KEY_BRIGHTNESSUP},
+ {KE_KEY, 0xe005, KEY_BRIGHTNESSDOWN},
+
+ /* Battery health status button */
+ {KE_KEY, 0xe007, KEY_BATTERY},
+
+ /* This is actually for all radios. Although physically a
+ * switch, the notification does not provide an indication of
+ * state and so it should be reported as a key */
+ {KE_KEY, 0xe008, KEY_WLAN},
+
+ /* The next device is at offset 6, the active devices are at
+ offset 8 and the attached devices at offset 10 */
+ {KE_KEY, 0xe00b, KEY_DISPLAYTOGGLE},
+
+ {KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE},
+
+ /* BIOS error detected */
+ {KE_IGNORE, 0xe00d, KEY_RESERVED},
+
+ /* Wifi Catcher */
+ {KE_KEY, 0xe011, KEY_PROG2},
+
+ /* Ambient light sensor toggle */
+ {KE_IGNORE, 0xe013, KEY_RESERVED},
+
+ {KE_IGNORE, 0xe020, KEY_MUTE},
+ {KE_IGNORE, 0xe02e, KEY_VOLUMEDOWN},
+ {KE_IGNORE, 0xe030, KEY_VOLUMEUP},
+ {KE_IGNORE, 0xe033, KEY_KBDILLUMUP},
+ {KE_IGNORE, 0xe034, KEY_KBDILLUMDOWN},
+ {KE_IGNORE, 0xe03a, KEY_CAPSLOCK},
+ {KE_IGNORE, 0xe045, KEY_NUMLOCK},
+ {KE_IGNORE, 0xe046, KEY_SCROLLLOCK},
{KE_END, 0}
};
@@ -122,15 +165,20 @@ static void dell_wmi_notify(u32 value, void *context)
if (obj && obj->type == ACPI_TYPE_BUFFER) {
int *buffer = (int *)obj->buffer.pointer;
- key = dell_wmi_get_entry_by_scancode(buffer[1]);
+ /*
+ * The upper bytes of the event may contain
+ * additional information, so mask them off for the
+ * scancode lookup
+ */
+ key = dell_wmi_get_entry_by_scancode(buffer[1] & 0xFFFF);
if (key) {
input_report_key(dell_wmi_input_dev, key->keycode, 1);
input_sync(dell_wmi_input_dev);
input_report_key(dell_wmi_input_dev, key->keycode, 0);
input_sync(dell_wmi_input_dev);
- } else
+ } else if (buffer[1] & 0xFFFF)
printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- buffer[1]);
+ buffer[1] & 0xFFFF);
}
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 8153b3e5918..ec560f16d72 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -16,6 +16,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -31,6 +33,7 @@
#include <linux/input.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#define EEEPC_LAPTOP_VERSION "0.1"
@@ -40,11 +43,6 @@
#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
#define EEEPC_HOTK_HID "ASUS010"
-#define EEEPC_LOG EEEPC_HOTK_FILE ": "
-#define EEEPC_ERR KERN_ERR EEEPC_LOG
-#define EEEPC_WARNING KERN_WARNING EEEPC_LOG
-#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG
-#define EEEPC_INFO KERN_INFO EEEPC_LOG
/*
* Definitions for Asus EeePC
@@ -62,7 +60,10 @@ enum {
DISABLE_ASL_GPS = 0x0020,
DISABLE_ASL_DISPLAYSWITCH = 0x0040,
DISABLE_ASL_MODEM = 0x0080,
- DISABLE_ASL_CARDREADER = 0x0100
+ DISABLE_ASL_CARDREADER = 0x0100,
+ DISABLE_ASL_3G = 0x0200,
+ DISABLE_ASL_WIMAX = 0x0400,
+ DISABLE_ASL_HWCF = 0x0800
};
enum {
@@ -87,7 +88,13 @@ enum {
CM_ASL_USBPORT3,
CM_ASL_MODEM,
CM_ASL_CARDREADER,
- CM_ASL_LID
+ CM_ASL_3G,
+ CM_ASL_WIMAX,
+ CM_ASL_HWCF,
+ CM_ASL_LID,
+ CM_ASL_TYPE,
+ CM_ASL_PANELPOWER, /*P901*/
+ CM_ASL_TPD
};
static const char *cm_getv[] = {
@@ -96,7 +103,8 @@ static const char *cm_getv[] = {
NULL, "PBLG", NULL, NULL,
"CFVG", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODG",
- "CRDG", "LIDG"
+ "CRDG", "M3GG", "WIMG", "HWCF",
+ "LIDG", "TYPE", "PBPG", "TPDG"
};
static const char *cm_setv[] = {
@@ -105,7 +113,8 @@ static const char *cm_setv[] = {
"SDSP", "PBLS", "HDPS", NULL,
"CFVS", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODS",
- "CRDS", NULL
+ "CRDS", "M3GS", "WIMS", NULL,
+ NULL, NULL, "PBPS", "TPDS"
};
#define EEEPC_EC "\\_SB.PCI0.SBRG.EC0."
@@ -130,8 +139,10 @@ struct eeepc_hotk {
u16 event_count[128]; /* count for each event */
struct input_dev *inputdev;
u16 *keycode_map;
- struct rfkill *eeepc_wlan_rfkill;
- struct rfkill *eeepc_bluetooth_rfkill;
+ struct rfkill *wlan_rfkill;
+ struct rfkill *bluetooth_rfkill;
+ struct rfkill *wwan3g_rfkill;
+ struct hotplug_slot *hotplug_slot;
};
/* The actual device the driver binds to */
@@ -181,6 +192,7 @@ static struct key_entry eeepc_keymap[] = {
static int eeepc_hotk_add(struct acpi_device *device);
static int eeepc_hotk_remove(struct acpi_device *device, int type);
static int eeepc_hotk_resume(struct acpi_device *device);
+static void eeepc_hotk_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id eeepc_device_ids[] = {
{EEEPC_HOTK_HID, 0},
@@ -192,13 +204,24 @@ static struct acpi_driver eeepc_hotk_driver = {
.name = EEEPC_HOTK_NAME,
.class = EEEPC_HOTK_CLASS,
.ids = eeepc_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = eeepc_hotk_add,
.remove = eeepc_hotk_remove,
.resume = eeepc_hotk_resume,
+ .notify = eeepc_hotk_notify,
},
};
+/* PCI hotplug ops */
+static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
+ .owner = THIS_MODULE,
+ .get_adapter_status = eeepc_get_adapter_status,
+ .get_power_status = eeepc_get_adapter_status,
+};
+
/* The backlight device /sys/class/backlight */
static struct backlight_device *eeepc_backlight_device;
@@ -260,20 +283,20 @@ static int set_acpi(int cm, int value)
if (method == NULL)
return -ENODEV;
if (write_acpi_int(ehotk->handle, method, value, NULL))
- printk(EEEPC_WARNING "Error writing %s\n", method);
+ pr_warning("Error writing %s\n", method);
}
return 0;
}
static int get_acpi(int cm)
{
- int value = -1;
+ int value = -ENODEV;
if ((ehotk->cm_supported & (0x1 << cm))) {
const char *method = cm_getv[cm];
if (method == NULL)
return -ENODEV;
if (read_acpi_int(ehotk->handle, method, &value))
- printk(EEEPC_WARNING "Error reading %s\n", method);
+ pr_warning("Error reading %s\n", method);
}
return value;
}
@@ -318,6 +341,15 @@ static const struct rfkill_ops eeepc_rfkill_ops = {
.set_block = eeepc_rfkill_set,
};
+static void __init eeepc_enable_camera(void)
+{
+ /*
+ * If the following call to set_acpi() fails, it's because there's no
+ * camera so we can ignore the error.
+ */
+ set_acpi(CM_ASL_CAMERA, 1);
+}
+
/*
* Sys helpers
*/
@@ -336,13 +368,19 @@ static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_acpi(cm, value);
+ value = set_acpi(cm, value);
+ if (value < 0)
+ return value;
return rv;
}
static ssize_t show_sys_acpi(int cm, char *buf)
{
- return sprintf(buf, "%d\n", get_acpi(cm));
+ int value = get_acpi(cm);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
}
#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
@@ -369,13 +407,88 @@ static ssize_t show_sys_acpi(int cm, char *buf)
EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
-EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV);
+
+struct eeepc_cpufv {
+ int num;
+ int cur;
+};
+
+static int get_cpufv(struct eeepc_cpufv *c)
+{
+ c->cur = get_acpi(CM_ASL_CPUFV);
+ c->num = (c->cur >> 8) & 0xff;
+ c->cur &= 0xff;
+ if (c->cur < 0 || c->num <= 0 || c->num > 12)
+ return -ENODEV;
+ return 0;
+}
+
+static ssize_t show_available_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_cpufv c;
+ int i;
+ ssize_t len = 0;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ for (i = 0; i < c.num; i++)
+ len += sprintf(buf + len, "%d ", i);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static ssize_t show_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_cpufv c;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
+}
+
+static ssize_t store_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeepc_cpufv c;
+ int rv, value;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ rv = parse_arg(buf, count, &value);
+ if (rv < 0)
+ return rv;
+ if (!rv || value < 0 || value >= c.num)
+ return -EINVAL;
+ set_acpi(CM_ASL_CPUFV, value);
+ return rv;
+}
+
+static struct device_attribute dev_attr_cpufv = {
+ .attr = {
+ .name = "cpufv",
+ .mode = 0644 },
+ .show = show_cpufv,
+ .store = store_cpufv
+};
+
+static struct device_attribute dev_attr_available_cpufv = {
+ .attr = {
+ .name = "available_cpufv",
+ .mode = 0444 },
+ .show = show_available_cpufv
+};
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
&dev_attr_cpufv.attr,
+ &dev_attr_available_cpufv.attr,
NULL
};
@@ -441,6 +554,28 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
return -EINVAL;
}
+static void cmsg_quirk(int cm, const char *name)
+{
+ int dummy;
+
+ /* Some BIOSes do not report cm although it is avaliable.
+ Check if cm_getv[cm] works and, if yes, assume cm should be set. */
+ if (!(ehotk->cm_supported & (1 << cm))
+ && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) {
+ pr_info("%s (%x) not reported by BIOS,"
+ " enabling anyway\n", name, 1 << cm);
+ ehotk->cm_supported |= 1 << cm;
+ }
+}
+
+static void cmsg_quirks(void)
+{
+ cmsg_quirk(CM_ASL_LID, "LID");
+ cmsg_quirk(CM_ASL_TYPE, "TYPE");
+ cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER");
+ cmsg_quirk(CM_ASL_TPD, "TPD");
+}
+
static int eeepc_hotk_check(void)
{
const struct key_entry *key;
@@ -453,26 +588,24 @@ static int eeepc_hotk_check(void)
if (ehotk->device->status.present) {
if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
&buffer)) {
- printk(EEEPC_ERR "Hotkey initialization failed\n");
+ pr_err("Hotkey initialization failed\n");
return -ENODEV;
} else {
- printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
- ehotk->init_flag);
+ pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag);
}
/* get control methods supported */
if (read_acpi_int(ehotk->handle, "CMSG"
, &ehotk->cm_supported)) {
- printk(EEEPC_ERR
- "Get control methods supported failed\n");
+ pr_err("Get control methods supported failed\n");
return -ENODEV;
} else {
- printk(EEEPC_INFO
- "Get control methods supported: 0x%x\n",
- ehotk->cm_supported);
+ cmsg_quirks();
+ pr_info("Get control methods supported: 0x%x\n",
+ ehotk->cm_supported);
}
ehotk->inputdev = input_allocate_device();
if (!ehotk->inputdev) {
- printk(EEEPC_INFO "Unable to allocate input device\n");
+ pr_info("Unable to allocate input device\n");
return 0;
}
ehotk->inputdev->name = "Asus EeePC extra buttons";
@@ -491,12 +624,12 @@ static int eeepc_hotk_check(void)
}
result = input_register_device(ehotk->inputdev);
if (result) {
- printk(EEEPC_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
input_free_device(ehotk->inputdev);
return 0;
}
} else {
- printk(EEEPC_ERR "Hotkey device not present, aborting\n");
+ pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
}
return 0;
@@ -514,6 +647,19 @@ static int notify_brn(void)
return -1;
}
+static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ int val = get_acpi(CM_ASL_WLAN);
+
+ if (val == 1 || val == 0)
+ *value = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
static void eeepc_rfkill_hotplug(void)
{
struct pci_dev *dev;
@@ -521,7 +667,7 @@ static void eeepc_rfkill_hotplug(void)
bool blocked;
if (!bus) {
- printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
+ pr_warning("Unable to find PCI bus 1?\n");
return;
}
@@ -537,7 +683,7 @@ static void eeepc_rfkill_hotplug(void)
if (dev) {
pci_bus_assign_resources(bus);
if (pci_bus_add_device(dev))
- printk(EEEPC_ERR "Unable to hotplug wifi\n");
+ pr_err("Unable to hotplug wifi\n");
}
} else {
dev = pci_get_slot(bus, 0);
@@ -547,7 +693,7 @@ static void eeepc_rfkill_hotplug(void)
}
}
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
+ rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
@@ -558,7 +704,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
eeepc_rfkill_hotplug();
}
-static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
{
static struct key_entry *key;
u16 count;
@@ -566,6 +712,8 @@ static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
if (!ehotk)
return;
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
brn = notify_brn();
count = ehotk->event_count[event % 128]++;
@@ -618,8 +766,7 @@ static int eeepc_register_rfkill_notifier(char *node)
eeepc_rfkill_notify,
NULL);
if (ACPI_FAILURE(status))
- printk(EEEPC_WARNING
- "Failed to register notify on %s\n", node);
+ pr_warning("Failed to register notify on %s\n", node);
} else
return -ENODEV;
@@ -638,20 +785,66 @@ static void eeepc_unregister_rfkill_notifier(char *node)
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify);
if (ACPI_FAILURE(status))
- printk(EEEPC_ERR
- "Error removing rfkill notify handler %s\n",
+ pr_err("Error removing rfkill notify handler %s\n",
node);
}
}
+static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
+{
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
+}
+
+static int eeepc_setup_pci_hotplug(void)
+{
+ int ret = -ENOMEM;
+ struct pci_bus *bus = pci_find_bus(0, 1);
+
+ if (!bus) {
+ pr_err("Unable to find wifi PCI bus\n");
+ return -ENODEV;
+ }
+
+ ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
+ if (!ehotk->hotplug_slot)
+ goto error_slot;
+
+ ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
+ GFP_KERNEL);
+ if (!ehotk->hotplug_slot->info)
+ goto error_info;
+
+ ehotk->hotplug_slot->private = ehotk;
+ ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
+ ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
+ eeepc_get_adapter_status(ehotk->hotplug_slot,
+ &ehotk->hotplug_slot->info->adapter_status);
+
+ ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi");
+ if (ret) {
+ pr_err("Unable to register hotplug slot - %d\n", ret);
+ goto error_register;
+ }
+
+ return 0;
+
+error_register:
+ kfree(ehotk->hotplug_slot->info);
+error_info:
+ kfree(ehotk->hotplug_slot);
+ ehotk->hotplug_slot = NULL;
+error_slot:
+ return ret;
+}
+
static int eeepc_hotk_add(struct acpi_device *device)
{
- acpi_status status = AE_OK;
int result;
if (!device)
return -EINVAL;
- printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
+ pr_notice(EEEPC_HOTK_NAME "\n");
ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
if (!ehotk)
return -ENOMEM;
@@ -664,58 +857,9 @@ static int eeepc_hotk_add(struct acpi_device *device)
result = eeepc_hotk_check();
if (result)
goto ehotk_fail;
- status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
- eeepc_hotk_notify, ehotk);
- if (ACPI_FAILURE(status))
- printk(EEEPC_ERR "Error installing notify handler\n");
-
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
-
- if (get_acpi(CM_ASL_WLAN) != -1) {
- ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan",
- &device->dev,
- RFKILL_TYPE_WLAN,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_WLAN);
-
- if (!ehotk->eeepc_wlan_rfkill)
- goto wlan_fail;
-
- rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill,
- get_acpi(CM_ASL_WLAN) != 1);
- result = rfkill_register(ehotk->eeepc_wlan_rfkill);
- if (result)
- goto wlan_fail;
- }
-
- if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
- ehotk->eeepc_bluetooth_rfkill =
- rfkill_alloc("eeepc-bluetooth",
- &device->dev,
- RFKILL_TYPE_BLUETOOTH,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_BLUETOOTH);
-
- if (!ehotk->eeepc_bluetooth_rfkill)
- goto bluetooth_fail;
-
- rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
- result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
- if (result)
- goto bluetooth_fail;
- }
return 0;
- bluetooth_fail:
- rfkill_destroy(ehotk->eeepc_bluetooth_rfkill);
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- wlan_fail:
- rfkill_destroy(ehotk->eeepc_wlan_rfkill);
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
ehotk_fail:
kfree(ehotk);
ehotk = NULL;
@@ -725,17 +869,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
static int eeepc_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
- eeepc_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(EEEPC_ERR "Error removing notify handler\n");
-
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
kfree(ehotk);
return 0;
@@ -743,7 +878,7 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type)
static int eeepc_hotk_resume(struct acpi_device *device)
{
- if (ehotk->eeepc_wlan_rfkill) {
+ if (ehotk->wlan_rfkill) {
bool wlan;
/* Workaround - it seems that _PTS disables the wireless
@@ -755,14 +890,13 @@ static int eeepc_hotk_resume(struct acpi_device *device)
wlan = get_acpi(CM_ASL_WLAN);
set_acpi(CM_ASL_WLAN, wlan);
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
- wlan != 1);
+ rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1);
eeepc_rfkill_hotplug();
}
- if (ehotk->eeepc_bluetooth_rfkill)
- rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
+ if (ehotk->bluetooth_rfkill)
+ rfkill_set_sw_state(ehotk->bluetooth_rfkill,
get_acpi(CM_ASL_BLUETOOTH) != 1);
return 0;
@@ -884,10 +1018,16 @@ static void eeepc_backlight_exit(void)
static void eeepc_rfkill_exit(void)
{
- if (ehotk->eeepc_wlan_rfkill)
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- if (ehotk->eeepc_bluetooth_rfkill)
- rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
+ if (ehotk->wlan_rfkill)
+ rfkill_unregister(ehotk->wlan_rfkill);
+ if (ehotk->bluetooth_rfkill)
+ rfkill_unregister(ehotk->bluetooth_rfkill);
+ if (ehotk->wwan3g_rfkill)
+ rfkill_unregister(ehotk->wwan3g_rfkill);
+ if (ehotk->hotplug_slot)
+ pci_hp_deregister(ehotk->hotplug_slot);
}
static void eeepc_input_exit(void)
@@ -922,6 +1062,75 @@ static void __exit eeepc_laptop_exit(void)
platform_driver_unregister(&platform_driver);
}
+static int eeepc_new_rfkill(struct rfkill **rfkill,
+ const char *name, struct device *dev,
+ enum rfkill_type type, int cm)
+{
+ int result;
+
+ result = get_acpi(cm);
+ if (result < 0)
+ return result;
+
+ *rfkill = rfkill_alloc(name, dev, type,
+ &eeepc_rfkill_ops, (void *)(unsigned long)cm);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+
+static int eeepc_rfkill_init(struct device *dev)
+{
+ int result = 0;
+
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+
+ result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
+ "eeepc-wlan", dev,
+ RFKILL_TYPE_WLAN, CM_ASL_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill,
+ "eeepc-bluetooth", dev,
+ RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill,
+ "eeepc-wwan3g", dev,
+ RFKILL_TYPE_WWAN, CM_ASL_3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_setup_pci_hotplug();
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_rfkill_exit();
+ return result;
+}
+
static int eeepc_backlight_init(struct device *dev)
{
struct backlight_device *bd;
@@ -929,8 +1138,7 @@ static int eeepc_backlight_init(struct device *dev)
bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
NULL, &eeepcbl_ops);
if (IS_ERR(bd)) {
- printk(EEEPC_ERR
- "Could not register eeepc backlight device\n");
+ pr_err("Could not register eeepc backlight device\n");
eeepc_backlight_device = NULL;
return PTR_ERR(bd);
}
@@ -949,8 +1157,7 @@ static int eeepc_hwmon_init(struct device *dev)
hwmon = hwmon_device_register(dev);
if (IS_ERR(hwmon)) {
- printk(EEEPC_ERR
- "Could not register eeepc hwmon device\n");
+ pr_err("Could not register eeepc hwmon device\n");
eeepc_hwmon_device = NULL;
return PTR_ERR(hwmon);
}
@@ -976,19 +1183,9 @@ static int __init eeepc_laptop_init(void)
acpi_bus_unregister_driver(&eeepc_hotk_driver);
return -ENODEV;
}
- dev = acpi_get_physical_device(ehotk->device->handle);
- if (!acpi_video_backlight_support()) {
- result = eeepc_backlight_init(dev);
- if (result)
- goto fail_backlight;
- } else
- printk(EEEPC_INFO "Backlight controlled by ACPI video "
- "driver\n");
+ eeepc_enable_camera();
- result = eeepc_hwmon_init(dev);
- if (result)
- goto fail_hwmon;
/* Register platform stuff */
result = platform_driver_register(&platform_driver);
if (result)
@@ -1005,7 +1202,33 @@ static int __init eeepc_laptop_init(void)
&platform_attribute_group);
if (result)
goto fail_sysfs;
+
+ dev = &platform_device->dev;
+
+ if (!acpi_video_backlight_support()) {
+ result = eeepc_backlight_init(dev);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video "
+ "driver\n");
+
+ result = eeepc_hwmon_init(dev);
+ if (result)
+ goto fail_hwmon;
+
+ result = eeepc_rfkill_init(dev);
+ if (result)
+ goto fail_rfkill;
+
return 0;
+fail_rfkill:
+ eeepc_hwmon_exit();
+fail_hwmon:
+ eeepc_backlight_exit();
+fail_backlight:
+ sysfs_remove_group(&platform_device->dev.kobj,
+ &platform_attribute_group);
fail_sysfs:
platform_device_del(platform_device);
fail_platform_device2:
@@ -1013,12 +1236,7 @@ fail_platform_device2:
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
- eeepc_hwmon_exit();
-fail_hwmon:
- eeepc_backlight_exit();
-fail_backlight:
eeepc_input_exit();
- eeepc_rfkill_exit();
return result;
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 16fffe44e33..4ac2311c00a 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -47,7 +47,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_DISPLAY_QUERY 0x1
#define HPWMI_HDDTEMP_QUERY 0x2
#define HPWMI_ALS_QUERY 0x3
-#define HPWMI_DOCK_QUERY 0x4
+#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
@@ -75,10 +75,9 @@ struct key_entry {
u16 keycode;
};
-enum { KE_KEY, KE_SW, KE_END };
+enum { KE_KEY, KE_END };
static struct key_entry hp_wmi_keymap[] = {
- {KE_SW, 0x01, SW_DOCK},
{KE_KEY, 0x02, KEY_BRIGHTNESSUP},
{KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
{KE_KEY, 0x20e6, KEY_PROG1},
@@ -151,7 +150,22 @@ static int hp_wmi_als_state(void)
static int hp_wmi_dock_state(void)
{
- return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0);
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ return ret & 0x1;
+}
+
+static int hp_wmi_tablet_state(void)
+{
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret & 0x4) ? 1 : 0;
}
static int hp_wmi_set_block(void *data, bool blocked)
@@ -232,6 +246,15 @@ static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", value);
}
+static ssize_t show_tablet(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_tablet_state();
+ if (value < 0)
+ return -EINVAL;
+ return sprintf(buf, "%d\n", value);
+}
+
static ssize_t set_als(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -244,6 +267,7 @@ static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
+static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL);
static struct key_entry *hp_wmi_get_entry_by_scancode(int code)
{
@@ -326,13 +350,13 @@ static void hp_wmi_notify(u32 value, void *context)
key->keycode, 0);
input_sync(hp_wmi_input_dev);
break;
- case KE_SW:
- input_report_switch(hp_wmi_input_dev,
- key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
}
+ } else if (eventcode == 0x1) {
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
} else if (eventcode == 0x5) {
if (wifi_rfkill)
rfkill_set_sw_state(wifi_rfkill,
@@ -369,18 +393,19 @@ static int __init hp_wmi_input_setup(void)
set_bit(EV_KEY, hp_wmi_input_dev->evbit);
set_bit(key->keycode, hp_wmi_input_dev->keybit);
break;
- case KE_SW:
- set_bit(EV_SW, hp_wmi_input_dev->evbit);
- set_bit(key->keycode, hp_wmi_input_dev->swbit);
-
- /* Set initial dock state */
- input_report_switch(hp_wmi_input_dev, key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
}
}
+ set_bit(EV_SW, hp_wmi_input_dev->evbit);
+ set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ /* Set initial hardware state */
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
+
err = input_register_device(hp_wmi_input_dev);
if (err) {
@@ -397,6 +422,7 @@ static void cleanup_sysfs(struct platform_device *device)
device_remove_file(&device->dev, &dev_attr_hddtemp);
device_remove_file(&device->dev, &dev_attr_als);
device_remove_file(&device->dev, &dev_attr_dock);
+ device_remove_file(&device->dev, &dev_attr_tablet);
}
static int __init hp_wmi_bios_setup(struct platform_device *device)
@@ -416,6 +442,9 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
err = device_create_file(&device->dev, &dev_attr_dock);
if (err)
goto add_sysfs_error;
+ err = device_create_file(&device->dev, &dev_attr_tablet);
+ if (err)
+ goto add_sysfs_error;
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
@@ -485,23 +514,17 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
static int hp_wmi_resume_handler(struct platform_device *device)
{
- struct key_entry *key;
-
/*
- * Docking state may have changed while suspended, so trigger
- * an input event for the current state. As this is a switch,
+ * Hardware state may have changed while suspended, so trigger
+ * input events for the current state. As this is a switch,
* the input layer will only actually pass it on if the state
* changed.
*/
- for (key = hp_wmi_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_SW:
- input_report_switch(hp_wmi_input_dev, key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
- }
- }
+
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
return 0;
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 40d64c03278..a463fd72c49 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
*/
#define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020300
+#define TPACPI_SYSFS_VERSION 0x020400
/*
* Changelog:
@@ -257,6 +257,8 @@ static struct {
u32 wan:1;
u32 uwb:1;
u32 fan_ctrl_status_undef:1;
+ u32 second_fan:1;
+ u32 beep_needs_two_args:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
u32 platform_drv_attrs_registered:1;
@@ -277,8 +279,10 @@ struct thinkpad_id_data {
char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
- u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
+ u16 bios_model; /* 1Y = 0x5931, 0 = unknown */
u16 ec_model;
+ u16 bios_release; /* 1ZETK1WW = 0x314b, 0 = unknown */
+ u16 ec_release;
char *model_str; /* ThinkPad T43 */
char *nummodel_str; /* 9384A9C for a 9384-A9C model */
@@ -355,6 +359,73 @@ static void tpacpi_log_usertask(const char * const what)
} \
} while (0)
+/*
+ * Quirk handling helpers
+ *
+ * ThinkPad IDs and versions seen in the field so far
+ * are two-characters from the set [0-9A-Z], i.e. base 36.
+ *
+ * We use values well outside that range as specials.
+ */
+
+#define TPACPI_MATCH_ANY 0xffffU
+#define TPACPI_MATCH_UNKNOWN 0U
+
+/* TPID('1', 'Y') == 0x5931 */
+#define TPID(__c1, __c2) (((__c2) << 8) | (__c1))
+
+#define TPACPI_Q_IBM(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+#define TPACPI_Q_LNV(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+struct tpacpi_quirk {
+ unsigned int vendor;
+ u16 bios;
+ u16 ec;
+ unsigned long quirks;
+};
+
+/**
+ * tpacpi_check_quirks() - search BIOS/EC version on a list
+ * @qlist: array of &struct tpacpi_quirk
+ * @qlist_size: number of elements in @qlist
+ *
+ * Iterates over a quirks list until one is found that matches the
+ * ThinkPad's vendor, BIOS and EC model.
+ *
+ * Returns 0 if nothing matches, otherwise returns the quirks field of
+ * the matching &struct tpacpi_quirk entry.
+ *
+ * The match criteria is: vendor, ec and bios much match.
+ */
+static unsigned long __init tpacpi_check_quirks(
+ const struct tpacpi_quirk *qlist,
+ unsigned int qlist_size)
+{
+ while (qlist_size) {
+ if ((qlist->vendor == thinkpad_id.vendor ||
+ qlist->vendor == TPACPI_MATCH_ANY) &&
+ (qlist->bios == thinkpad_id.bios_model ||
+ qlist->bios == TPACPI_MATCH_ANY) &&
+ (qlist->ec == thinkpad_id.ec_model ||
+ qlist->ec == TPACPI_MATCH_ANY))
+ return qlist->quirks;
+
+ qlist_size--;
+ qlist++;
+ }
+ return 0;
+}
+
+
/****************************************************************************
****************************************************************************
*
@@ -2880,7 +2951,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* update bright_acpimode... */
tpacpi_check_std_acpi_brightness_support();
- if (tp_features.bright_acpimode) {
+ if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
printk(TPACPI_INFO
"This ThinkPad has standard ACPI backlight "
"brightness control, supported by the ACPI "
@@ -4773,7 +4844,7 @@ TPACPI_HANDLE(led, ec, "SLED", /* 570 */
"LED", /* all others */
); /* R30, R31 */
-#define TPACPI_LED_NUMLEDS 8
+#define TPACPI_LED_NUMLEDS 16
static struct tpacpi_led_classdev *tpacpi_leds;
static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
@@ -4786,15 +4857,20 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
"tpacpi::dock_batt",
"tpacpi::unknown_led",
"tpacpi::standby",
+ "tpacpi::dock_status1",
+ "tpacpi::dock_status2",
+ "tpacpi::unknown_led2",
+ "tpacpi::unknown_led3",
+ "tpacpi::thinkvantage",
};
-#define TPACPI_SAFE_LEDS 0x0081U
+#define TPACPI_SAFE_LEDS 0x1081U
static inline bool tpacpi_is_led_restricted(const unsigned int led)
{
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
return false;
#else
- return (TPACPI_SAFE_LEDS & (1 << led)) == 0;
+ return (1U & (TPACPI_SAFE_LEDS >> led)) == 0;
#endif
}
@@ -4956,6 +5032,10 @@ static int __init tpacpi_init_led(unsigned int led)
tpacpi_leds[led].led = led;
+ /* LEDs with no name don't get registered */
+ if (!tpacpi_led_names[led])
+ return 0;
+
tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set;
tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set;
if (led_supported == TPACPI_LED_570)
@@ -4974,10 +5054,59 @@ static int __init tpacpi_init_led(unsigned int led)
return rc;
}
+static const struct tpacpi_quirk led_useful_qtable[] __initconst = {
+ TPACPI_Q_IBM('1', 'E', 0x009f), /* A30 */
+ TPACPI_Q_IBM('1', 'N', 0x009f), /* A31 */
+ TPACPI_Q_IBM('1', 'G', 0x009f), /* A31 */
+
+ TPACPI_Q_IBM('1', 'I', 0x0097), /* T30 */
+ TPACPI_Q_IBM('1', 'R', 0x0097), /* T40, T41, T42, R50, R51 */
+ TPACPI_Q_IBM('7', '0', 0x0097), /* T43, R52 */
+ TPACPI_Q_IBM('1', 'Y', 0x0097), /* T43 */
+ TPACPI_Q_IBM('1', 'W', 0x0097), /* R50e */
+ TPACPI_Q_IBM('1', 'V', 0x0097), /* R51 */
+ TPACPI_Q_IBM('7', '8', 0x0097), /* R51e */
+ TPACPI_Q_IBM('7', '6', 0x0097), /* R52 */
+
+ TPACPI_Q_IBM('1', 'K', 0x00bf), /* X30 */
+ TPACPI_Q_IBM('1', 'Q', 0x00bf), /* X31, X32 */
+ TPACPI_Q_IBM('1', 'U', 0x00bf), /* X40 */
+ TPACPI_Q_IBM('7', '4', 0x00bf), /* X41 */
+ TPACPI_Q_IBM('7', '5', 0x00bf), /* X41t */
+
+ TPACPI_Q_IBM('7', '9', 0x1f97), /* T60 (1) */
+ TPACPI_Q_IBM('7', '7', 0x1f97), /* Z60* (1) */
+ TPACPI_Q_IBM('7', 'F', 0x1f97), /* Z61* (1) */
+ TPACPI_Q_IBM('7', 'B', 0x1fb7), /* X60 (1) */
+
+ /* (1) - may have excess leds enabled on MSB */
+
+ /* Defaults (order matters, keep last, don't reorder!) */
+ { /* Lenovo */
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x1fffU,
+ },
+ { /* IBM ThinkPads with no EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_UNKNOWN,
+ .quirks = 0x00ffU,
+ },
+ { /* IBM ThinkPads with EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x00bfU,
+ },
+};
+
+#undef TPACPI_LEDQ_IBM
+#undef TPACPI_LEDQ_LNV
+
static int __init led_init(struct ibm_init_struct *iibm)
{
unsigned int i;
int rc;
+ unsigned long useful_leds;
vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
@@ -4999,6 +5128,9 @@ static int __init led_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
str_supported(led_supported), led_supported);
+ if (led_supported == TPACPI_LED_NONE)
+ return 1;
+
tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
GFP_KERNEL);
if (!tpacpi_leds) {
@@ -5006,8 +5138,12 @@ static int __init led_init(struct ibm_init_struct *iibm)
return -ENOMEM;
}
+ useful_leds = tpacpi_check_quirks(led_useful_qtable,
+ ARRAY_SIZE(led_useful_qtable));
+
for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
- if (!tpacpi_is_led_restricted(i)) {
+ if (!tpacpi_is_led_restricted(i) &&
+ test_bit(i, &useful_leds)) {
rc = tpacpi_init_led(i);
if (rc < 0) {
led_exit();
@@ -5017,12 +5153,11 @@ static int __init led_init(struct ibm_init_struct *iibm)
}
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
- if (led_supported != TPACPI_LED_NONE)
- printk(TPACPI_NOTICE
- "warning: userspace override of important "
- "firmware LEDs is enabled\n");
+ printk(TPACPI_NOTICE
+ "warning: userspace override of important "
+ "firmware LEDs is enabled\n");
#endif
- return (led_supported != TPACPI_LED_NONE)? 0 : 1;
+ return 0;
}
#define str_led_status(s) \
@@ -5052,7 +5187,7 @@ static int led_read(char *p)
}
len += sprintf(p + len, "commands:\t"
- "<led> on, <led> off, <led> blink (<led> is 0-7)\n");
+ "<led> on, <led> off, <led> blink (<led> is 0-15)\n");
return len;
}
@@ -5067,7 +5202,7 @@ static int led_write(char *buf)
return -ENODEV;
while ((cmd = next_cmd(&buf))) {
- if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7)
+ if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15)
return -EINVAL;
if (strstr(cmd, "off")) {
@@ -5101,8 +5236,17 @@ static struct ibm_struct led_driver_data = {
TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
+#define TPACPI_BEEP_Q1 0x0001
+
+static const struct tpacpi_quirk beep_quirk_table[] __initconst = {
+ TPACPI_Q_IBM('I', 'M', TPACPI_BEEP_Q1), /* 570 */
+ TPACPI_Q_IBM('I', 'U', TPACPI_BEEP_Q1), /* 570E - unverified */
+};
+
static int __init beep_init(struct ibm_init_struct *iibm)
{
+ unsigned long quirks;
+
vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
TPACPI_ACPIHANDLE_INIT(beep);
@@ -5110,6 +5254,11 @@ static int __init beep_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
str_supported(beep_handle != NULL));
+ quirks = tpacpi_check_quirks(beep_quirk_table,
+ ARRAY_SIZE(beep_quirk_table));
+
+ tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1);
+
return (beep_handle)? 0 : 1;
}
@@ -5141,8 +5290,15 @@ static int beep_write(char *buf)
/* beep_cmd set */
} else
return -EINVAL;
- if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0))
- return -EIO;
+ if (tp_features.beep_needs_two_args) {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vdd",
+ beep_cmd, 0))
+ return -EIO;
+ } else {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vd",
+ beep_cmd))
+ return -EIO;
+ }
}
return 0;
@@ -5569,6 +5725,10 @@ static struct ibm_struct ecdump_driver_data = {
* Bit 3-0: backlight brightness level
*
* brightness_get_raw returns status data in the HBRV layout
+ *
+ * WARNING: The X61 has been verified to use HBRV for something else, so
+ * this should be used _only_ on IBM ThinkPads, and maybe with some careful
+ * testing on the very early *60 Lenovo models...
*/
enum {
@@ -5869,6 +6029,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
brightness_mode);
}
+ /* Safety */
+ if (thinkpad_id.vendor != PCI_VENDOR_ID_IBM &&
+ (brightness_mode == TPACPI_BRGHT_MODE_ECNVRAM ||
+ brightness_mode == TPACPI_BRGHT_MODE_EC))
+ return -EINVAL;
+
if (tpacpi_brightness_get_raw(&b) < 0)
return 1;
@@ -6161,6 +6327,21 @@ static struct ibm_struct volume_driver_data = {
* For firmware bugs, refer to:
* http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
*
+ * ----
+ *
+ * ThinkPad EC register 0x31 bit 0 (only on select models)
+ *
+ * When bit 0 of EC register 0x31 is zero, the tachometer registers
+ * show the speed of the main fan. When bit 0 of EC register 0x31
+ * is one, the tachometer registers show the speed of the auxiliary
+ * fan.
+ *
+ * Fan control seems to affect both fans, regardless of the state
+ * of this bit.
+ *
+ * So far, only the firmware for the X60/X61 non-tablet versions
+ * seem to support this (firmware TP-7M).
+ *
* TPACPI_FAN_WR_ACPI_FANS:
* ThinkPad X31, X40, X41. Not available in the X60.
*
@@ -6187,6 +6368,8 @@ enum { /* Fan control constants */
fan_status_offset = 0x2f, /* EC register 0x2f */
fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
* 0x84 must be read before 0x85 */
+ fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M)
+ bit 0 selects which fan is active */
TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
@@ -6249,30 +6432,18 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
* We assume 0x07 really means auto mode while this quirk is active,
* as this is far more likely than the ThinkPad being in level 7,
* which is only used by the firmware during thermal emergencies.
+ *
+ * Enable for TP-1Y (T43), TP-78 (R51e), TP-76 (R52),
+ * TP-70 (T43, R52), which are known to be buggy.
*/
-static void fan_quirk1_detect(void)
+static void fan_quirk1_setup(void)
{
- /* In some ThinkPads, neither the EC nor the ACPI
- * DSDT initialize the HFSP register, and it ends up
- * being initially set to 0x07 when it *could* be
- * either 0x07 or 0x80.
- *
- * Enable for TP-1Y (T43), TP-78 (R51e),
- * TP-76 (R52), TP-70 (T43, R52), which are known
- * to be buggy. */
if (fan_control_initial_status == 0x07) {
- switch (thinkpad_id.ec_model) {
- case 0x5931: /* TP-1Y */
- case 0x3837: /* TP-78 */
- case 0x3637: /* TP-76 */
- case 0x3037: /* TP-70 */
- printk(TPACPI_NOTICE
- "fan_init: initial fan status is unknown, "
- "assuming it is in auto mode\n");
- tp_features.fan_ctrl_status_undef = 1;
- ;;
- }
+ printk(TPACPI_NOTICE
+ "fan_init: initial fan status is unknown, "
+ "assuming it is in auto mode\n");
+ tp_features.fan_ctrl_status_undef = 1;
}
}
@@ -6292,6 +6463,38 @@ static void fan_quirk1_handle(u8 *fan_status)
}
}
+/* Select main fan on X60/X61, NOOP on others */
+static bool fan_select_fan1(void)
+{
+ if (tp_features.second_fan) {
+ u8 val;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val &= 0xFEU;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+ }
+ return true;
+}
+
+/* Select secondary fan on X60/X61 */
+static bool fan_select_fan2(void)
+{
+ u8 val;
+
+ if (!tp_features.second_fan)
+ return false;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val |= 0x01U;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+
+ return true;
+}
+
/*
* Call with fan_mutex held
*/
@@ -6369,6 +6572,8 @@ static int fan_get_speed(unsigned int *speed)
switch (fan_status_access_mode) {
case TPACPI_FAN_RD_TPEC:
/* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan1()))
+ return -EIO;
if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
!acpi_ec_read(fan_rpm_offset + 1, &hi)))
return -EIO;
@@ -6385,6 +6590,34 @@ static int fan_get_speed(unsigned int *speed)
return 0;
}
+static int fan2_get_speed(unsigned int *speed)
+{
+ u8 hi, lo;
+ bool rc;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan2()))
+ return -EIO;
+ rc = !acpi_ec_read(fan_rpm_offset, &lo) ||
+ !acpi_ec_read(fan_rpm_offset + 1, &hi);
+ fan_select_fan1(); /* play it safe */
+ if (rc)
+ return -EIO;
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
static int fan_set_level(int level)
{
if (!fan_control_allowed)
@@ -6790,6 +7023,25 @@ static struct device_attribute dev_attr_fan_fan1_input =
__ATTR(fan1_input, S_IRUGO,
fan_fan1_input_show, NULL);
+/* sysfs fan fan2_input ------------------------------------------------ */
+static ssize_t fan_fan2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res;
+ unsigned int speed;
+
+ res = fan2_get_speed(&speed);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", speed);
+}
+
+static struct device_attribute dev_attr_fan_fan2_input =
+ __ATTR(fan2_input, S_IRUGO,
+ fan_fan2_input_show, NULL);
+
/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
char *buf)
@@ -6823,6 +7075,7 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
static struct attribute *fan_attributes[] = {
&dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
&dev_attr_fan_fan1_input.attr,
+ NULL, /* for fan2_input */
NULL
};
@@ -6830,9 +7083,36 @@ static const struct attribute_group fan_attr_group = {
.attrs = fan_attributes,
};
+#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
+#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+
+#define TPACPI_FAN_QI(__id1, __id2, __quirks) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = __quirks }
+
+#define TPACPI_FAN_QL(__id1, __id2, __quirks) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = __quirks }
+
+static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ TPACPI_FAN_QI('1', 'Y', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '8', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '6', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '0', TPACPI_FAN_Q1),
+ TPACPI_FAN_QL('7', 'M', TPACPI_FAN_2FAN),
+};
+
+#undef TPACPI_FAN_QL
+#undef TPACPI_FAN_QI
+
static int __init fan_init(struct ibm_init_struct *iibm)
{
int rc;
+ unsigned long quirks;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
"initializing fan subdriver\n");
@@ -6843,12 +7123,16 @@ static int __init fan_init(struct ibm_init_struct *iibm)
fan_control_commands = 0;
fan_watchdog_maxinterval = 0;
tp_features.fan_ctrl_status_undef = 0;
+ tp_features.second_fan = 0;
fan_control_desired_level = 7;
TPACPI_ACPIHANDLE_INIT(fans);
TPACPI_ACPIHANDLE_INIT(gfan);
TPACPI_ACPIHANDLE_INIT(sfan);
+ quirks = tpacpi_check_quirks(fan_quirk_table,
+ ARRAY_SIZE(fan_quirk_table));
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -6858,7 +7142,13 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (likely(acpi_ec_read(fan_status_offset,
&fan_control_initial_status))) {
fan_status_access_mode = TPACPI_FAN_RD_TPEC;
- fan_quirk1_detect();
+ if (quirks & TPACPI_FAN_Q1)
+ fan_quirk1_setup();
+ if (quirks & TPACPI_FAN_2FAN) {
+ tp_features.second_fan = 1;
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
+ "secondary fan support enabled\n");
+ }
} else {
printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
@@ -6914,6 +7204,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (fan_status_access_mode != TPACPI_FAN_NONE ||
fan_control_access_mode != TPACPI_FAN_WR_NONE) {
+ if (tp_features.second_fan) {
+ /* attach second fan tachometer */
+ fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
+ &dev_attr_fan_fan2_input.attr;
+ }
rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
&fan_attr_group);
if (rc < 0)
@@ -7385,6 +7680,24 @@ err_out:
/* Probing */
+static bool __pure __init tpacpi_is_fw_digit(const char c)
+{
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
+}
+
+/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
+static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
+ const char t)
+{
+ return s && strlen(s) >= 8 &&
+ tpacpi_is_fw_digit(s[0]) &&
+ tpacpi_is_fw_digit(s[1]) &&
+ s[2] == t && s[3] == 'T' &&
+ tpacpi_is_fw_digit(s[4]) &&
+ tpacpi_is_fw_digit(s[5]) &&
+ s[6] == 'W' && s[7] == 'W';
+}
+
/* returns 0 - probe ok, or < 0 - probe error.
* Probe ok doesn't mean thinkpad found.
* On error, kfree() cleanup on tp->* is not performed, caller must do it */
@@ -7411,10 +7724,15 @@ static int __must_check __init get_thinkpad_model_data(
tp->bios_version_str = kstrdup(s, GFP_KERNEL);
if (s && !tp->bios_version_str)
return -ENOMEM;
- if (!tp->bios_version_str)
+
+ /* Really ancient ThinkPad 240X will fail this, which is fine */
+ if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E'))
return 0;
+
tp->bios_model = tp->bios_version_str[0]
| (tp->bios_version_str[1] << 8);
+ tp->bios_release = (tp->bios_version_str[4] << 8)
+ | tp->bios_version_str[5];
/*
* ThinkPad T23 or newer, A31 or newer, R50e or newer,
@@ -7433,8 +7751,21 @@ static int __must_check __init get_thinkpad_model_data(
tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
if (!tp->ec_version_str)
return -ENOMEM;
- tp->ec_model = ec_fw_string[0]
- | (ec_fw_string[1] << 8);
+
+ if (tpacpi_is_valid_fw_id(ec_fw_string, 'H')) {
+ tp->ec_model = ec_fw_string[0]
+ | (ec_fw_string[1] << 8);
+ tp->ec_release = (ec_fw_string[4] << 8)
+ | ec_fw_string[5];
+ } else {
+ printk(TPACPI_NOTICE
+ "ThinkPad firmware release %s "
+ "doesn't match the known patterns\n",
+ ec_fw_string);
+ printk(TPACPI_NOTICE
+ "please report this to %s\n",
+ TPACPI_MAIL);
+ }
break;
}
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 7f207f335be..ef3a2cd3a7a 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -287,6 +287,25 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
ACPI_DECODE_16);
}
+static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev,
+ struct acpi_resource *res)
+{
+ struct acpi_resource_extended_address64 *p = &res->data.ext_address64;
+
+ if (p->producer_consumer == ACPI_PRODUCER)
+ return;
+
+ if (p->resource_type == ACPI_MEMORY_RANGE)
+ pnpacpi_parse_allocated_memresource(dev,
+ p->minimum, p->address_length,
+ p->info.mem.write_protect);
+ else if (p->resource_type == ACPI_IO_RANGE)
+ pnpacpi_parse_allocated_ioresource(dev,
+ p->minimum, p->address_length,
+ p->granularity == 0xfff ? ACPI_DECODE_10 :
+ ACPI_DECODE_16);
+}
+
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
@@ -400,8 +419,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
- if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER)
- return AE_OK;
+ pnpacpi_parse_allocated_ext_address_space(dev, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -630,6 +648,28 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
IORESOURCE_IO_FIXED);
}
+static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
+ unsigned int option_flags,
+ struct acpi_resource *r)
+{
+ struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
+ unsigned char flags = 0;
+
+ if (p->address_length == 0)
+ return;
+
+ if (p->resource_type == ACPI_MEMORY_RANGE) {
+ if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
+ flags = IORESOURCE_MEM_WRITEABLE;
+ pnp_register_mem_resource(dev, option_flags, p->minimum,
+ p->minimum, 0, p->address_length,
+ flags);
+ } else if (p->resource_type == ACPI_IO_RANGE)
+ pnp_register_port_resource(dev, option_flags, p->minimum,
+ p->minimum, 0, p->address_length,
+ IORESOURCE_IO_FIXED);
+}
+
struct acpipnp_parse_option_s {
struct pnp_dev *dev;
unsigned int option_flags;
@@ -711,6 +751,7 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ pnpacpi_parse_ext_address_option(dev, option_flags, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -765,6 +806,7 @@ static int pnpacpi_supported_resource(struct acpi_resource *res)
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
+ case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
return 1;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 33da1127992..7eda34838bf 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -82,6 +82,14 @@ config BATTERY_DA9030
Say Y here to enable support for batteries charger integrated into
DA9030 PMIC.
+config BATTERY_MAX17040
+ tristate "Maxim MAX17040 Fuel Gauge"
+ depends on I2C
+ help
+ MAX17040 is fuel-gauge systems for lithium-ion (Li+) batteries
+ in handheld and portable equipment. The MAX17040 is configured
+ to operate with a single lithium cell
+
config CHARGER_PCF50633
tristate "NXP PCF50633 MBC"
depends on MFD_PCF50633
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 2fcf41d13e5..daf3179689a 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
-obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o \ No newline at end of file
+obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 1662bb0f23a..3364198134a 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -22,8 +22,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#define DA9030_STATUS_CHDET (1 << 3)
-
#define DA9030_FAULT_LOG 0x0a
#define DA9030_FAULT_LOG_OVER_TEMP (1 << 7)
#define DA9030_FAULT_LOG_VBAT_OVER (1 << 4)
@@ -244,6 +242,8 @@ static void da9030_set_charge(struct da9030_charger *charger, int on)
}
da903x_write(charger->master, DA9030_CHARGE_CONTROL, val);
+
+ power_supply_changed(&charger->psy);
}
static void da9030_charger_check_state(struct da9030_charger *charger)
@@ -258,6 +258,12 @@ static void da9030_charger_check_state(struct da9030_charger *charger)
da9030_set_charge(charger, 1);
}
} else {
+ /* Charger has been pulled out */
+ if (!charger->chdet) {
+ da9030_set_charge(charger, 0);
+ return;
+ }
+
if (charger->adc.vbat_res >=
charger->thresholds.vbat_charge_stop) {
da9030_set_charge(charger, 0);
@@ -395,13 +401,11 @@ static int da9030_battery_event(struct notifier_block *nb, unsigned long event,
{
struct da9030_charger *charger =
container_of(nb, struct da9030_charger, nb);
- int status;
switch (event) {
case DA9030_EVENT_CHDET:
- status = da903x_query_status(charger->master,
- DA9030_STATUS_CHDET);
- da9030_set_charge(charger, status);
+ cancel_delayed_work_sync(&charger->work);
+ schedule_work(&charger->work.work);
break;
case DA9030_EVENT_VBATMON:
da9030_battery_vbat_event(charger);
@@ -565,7 +569,8 @@ static int da9030_battery_remove(struct platform_device *dev)
da903x_unregister_notifier(charger->master, &charger->nb,
DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON |
DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT);
- cancel_delayed_work(&charger->work);
+ cancel_delayed_work_sync(&charger->work);
+ da9030_set_charge(charger, 0);
power_supply_unregister(&charger->psy);
kfree(charger);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index a52d4a11652..520b5c49ff3 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -62,6 +62,10 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+static unsigned int pmod_enabled;
+module_param(pmod_enabled, bool, 0644);
+MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
+
/* Some batteries have their rated capacity stored a N * 10 mAh, while
* others use an index into this table. */
static int rated_capacities[] = {
@@ -259,6 +263,17 @@ static void ds2760_battery_update_status(struct ds2760_device_info *di)
power_supply_changed(&di->bat);
}
+static void ds2760_battery_write_status(struct ds2760_device_info *di,
+ char status)
+{
+ if (status == di->raw[DS2760_STATUS_REG])
+ return;
+
+ w1_ds2760_write(di->w1_dev, &status, DS2760_STATUS_WRITE_REG, 1);
+ w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+}
+
static void ds2760_battery_work(struct work_struct *work)
{
struct ds2760_device_info *di = container_of(work,
@@ -342,9 +357,9 @@ static enum power_supply_property ds2760_battery_props[] = {
static int ds2760_battery_probe(struct platform_device *pdev)
{
+ char status;
int retval = 0;
struct ds2760_device_info *di;
- struct ds2760_platform_data *pdata;
di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di) {
@@ -354,14 +369,13 @@ static int ds2760_battery_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, di);
- pdata = pdev->dev.platform_data;
- di->dev = &pdev->dev;
- di->w1_dev = pdev->dev.parent;
- di->bat.name = dev_name(&pdev->dev);
- di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
- di->bat.properties = ds2760_battery_props;
- di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
- di->bat.get_property = ds2760_battery_get_property;
+ di->dev = &pdev->dev;
+ di->w1_dev = pdev->dev.parent;
+ di->bat.name = dev_name(&pdev->dev);
+ di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->bat.properties = ds2760_battery_props;
+ di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
+ di->bat.get_property = ds2760_battery_get_property;
di->bat.external_power_changed =
ds2760_battery_external_power_changed;
@@ -373,6 +387,16 @@ static int ds2760_battery_probe(struct platform_device *pdev)
goto batt_failed;
}
+ /* enable sleep mode feature */
+ ds2760_battery_read_status(di);
+ status = di->raw[DS2760_STATUS_REG];
+ if (pmod_enabled)
+ status |= DS2760_STATUS_PMOD;
+ else
+ status &= ~DS2760_STATUS_PMOD;
+
+ ds2760_battery_write_status(di, status);
+
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
if (!di->monitor_wqueue) {
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
new file mode 100644
index 00000000000..87b98bf27ae
--- /dev/null
+++ b/drivers/power/max17040_battery.c
@@ -0,0 +1,309 @@
+/*
+ * max17040_battery.c
+ * fuel-gauge systems for lithium-ion (Li+) batteries
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/power_supply.h>
+#include <linux/max17040_battery.h>
+
+#define MAX17040_VCELL_MSB 0x02
+#define MAX17040_VCELL_LSB 0x03
+#define MAX17040_SOC_MSB 0x04
+#define MAX17040_SOC_LSB 0x05
+#define MAX17040_MODE_MSB 0x06
+#define MAX17040_MODE_LSB 0x07
+#define MAX17040_VER_MSB 0x08
+#define MAX17040_VER_LSB 0x09
+#define MAX17040_RCOMP_MSB 0x0C
+#define MAX17040_RCOMP_LSB 0x0D
+#define MAX17040_CMD_MSB 0xFE
+#define MAX17040_CMD_LSB 0xFF
+
+#define MAX17040_DELAY 1000
+#define MAX17040_BATTERY_FULL 95
+
+struct max17040_chip {
+ struct i2c_client *client;
+ struct delayed_work work;
+ struct power_supply battery;
+ struct max17040_platform_data *pdata;
+
+ /* State Of Connect */
+ int online;
+ /* battery voltage */
+ int vcell;
+ /* battery capacity */
+ int soc;
+ /* State Of Charge */
+ int status;
+};
+
+static int max17040_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max17040_chip *chip = container_of(psy,
+ struct max17040_chip, battery);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = chip->status;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->online;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = chip->vcell;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = chip->soc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int max17040_read_reg(struct i2c_client *client, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static void max17040_reset(struct i2c_client *client)
+{
+ max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
+ max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+}
+
+static void max17040_get_vcell(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
+ lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+
+ chip->vcell = (msb << 4) + (lsb >> 4);
+}
+
+static void max17040_get_soc(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_SOC_MSB);
+ lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+
+ chip->soc = msb;
+}
+
+static void max17040_get_version(struct i2c_client *client)
+{
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_VER_MSB);
+ lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+
+ dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+}
+
+static void max17040_get_online(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ if (chip->pdata->battery_online)
+ chip->online = chip->pdata->battery_online();
+ else
+ chip->online = 1;
+}
+
+static void max17040_get_status(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ return;
+ }
+
+ if (chip->pdata->charger_online()) {
+ if (chip->pdata->charger_enable())
+ chip->status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ chip->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else {
+ chip->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ if (chip->soc > MAX17040_BATTERY_FULL)
+ chip->status = POWER_SUPPLY_STATUS_FULL;
+}
+
+static void max17040_work(struct work_struct *work)
+{
+ struct max17040_chip *chip;
+
+ chip = container_of(work, struct max17040_chip, work.work);
+
+ max17040_get_vcell(chip->client);
+ max17040_get_soc(chip->client);
+ max17040_get_online(chip->client);
+ max17040_get_status(chip->client);
+
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+}
+
+static enum power_supply_property max17040_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int __devinit max17040_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct max17040_chip *chip;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->pdata = client->dev.platform_data;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->battery.name = "battery";
+ chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->battery.get_property = max17040_get_property;
+ chip->battery.properties = max17040_battery_props;
+ chip->battery.num_properties = ARRAY_SIZE(max17040_battery_props);
+
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed: power supply register\n");
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return ret;
+ }
+
+ max17040_reset(client);
+ max17040_get_version(client);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work);
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+
+ return 0;
+}
+
+static int __devexit max17040_remove(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ power_supply_unregister(&chip->battery);
+ cancel_delayed_work(&chip->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int max17040_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ cancel_delayed_work(&chip->work);
+ return 0;
+}
+
+static int max17040_resume(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+ return 0;
+}
+
+#else
+
+#define max17040_suspend NULL
+#define max17040_resume NULL
+
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id max17040_id[] = {
+ { "max17040", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max17040_id);
+
+static struct i2c_driver max17040_i2c_driver = {
+ .driver = {
+ .name = "max17040",
+ },
+ .probe = max17040_probe,
+ .remove = __devexit_p(max17040_remove),
+ .suspend = max17040_suspend,
+ .resume = max17040_resume,
+ .id_table = max17040_id,
+};
+
+static int __init max17040_init(void)
+{
+ return i2c_add_driver(&max17040_i2c_driver);
+}
+module_init(max17040_init);
+
+static void __exit max17040_exit(void)
+{
+ i2c_del_driver(&max17040_i2c_driver);
+}
+module_exit(max17040_exit);
+
+MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
+MODULE_DESCRIPTION("MAX17040 Fuel Gauge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index aafd3e6ebb0..a118eb0f1e6 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,8 +1,8 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
+ * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
*
- * Copyright 2004-2008 Analog Devices Inc.
+ * Copyright 2004-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
struct bfin_rtc *rtc;
struct device *dev = &pdev->dev;
int ret = 0;
- unsigned long timeout;
+ unsigned long timeout = jiffies + HZ;
dev_dbg_stamp(dev);
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
device_init_wakeup(dev, 1);
+ /* Register our RTC with the RTC framework */
+ rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops,
+ THIS_MODULE);
+ if (unlikely(IS_ERR(rtc->rtc_dev))) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err;
+ }
+
/* Grab the IRQ and init the hardware */
ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
- goto err;
+ goto err_reg;
/* sometimes the bootloader touched things, but the write complete was not
* enabled, so let's just do a quick timeout here since the IRQ will not fire ...
*/
- timeout = jiffies + HZ;
while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
if (time_after(jiffies, timeout))
break;
bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
- /* Register our RTC with the RTC framework */
- rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
- if (unlikely(IS_ERR(rtc->rtc_dev))) {
- ret = PTR_ERR(rtc->rtc_dev);
- goto err_irq;
- }
-
return 0;
- err_irq:
- free_irq(IRQ_RTC, dev);
- err:
+err_reg:
+ rtc_device_unregister(rtc->rtc_dev);
+err:
kfree(rtc);
return ret;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8201387b4da..ef142fd47a8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp);
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
- struct request_queue *q = sfp->parentdp->device->request_queue;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
- return blk_verify_command(&q->cmd_filter,
- cmd, filp->f_mode & FMODE_WRITE);
+ return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index e371a9c1534..6160e03f410 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -398,8 +398,7 @@ static int sbs_init(struct pci_dev *dev)
{
u8 __iomem *p;
- p = ioremap_nocache(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
+ p = pci_ioremap_bar(dev, 0);
if (p == NULL)
return -ENOMEM;
@@ -423,8 +422,7 @@ static void __devexit sbs_exit(struct pci_dev *dev)
{
u8 __iomem *p;
- p = ioremap_nocache(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
+ p = pci_ioremap_bar(dev, 0);
/* FIXME: What if resource_len < OCT_REG_CR_OFF */
if (p != NULL)
writeb(0, p + OCT_REG_CR_OFF);
@@ -761,6 +759,8 @@ static int pci_netmos_init(struct pci_dev *dev)
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return 0;
@@ -3559,6 +3559,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_VENDOR_ID_IBM, 0x0299,
0, 0, pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 9f2891c2c4a..cd1b6a45bb8 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1548,8 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
goto probe_exit1;
}
- icom_adapter->base_addr = ioremap(icom_adapter->base_addr_pci,
- pci_resource_len(dev, 0));
+ icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
if (!icom_adapter->base_addr)
goto probe_exit1;
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 107ce2e187b..00f4577d2f7 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -467,7 +467,7 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd)
printk(KERN_INFO "jsm: linemap is full, added device failed\n");
continue;
} else
- set_bit((int)line, linemap);
+ set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
printk(KERN_INFO "jsm: add device failed\n");
@@ -503,7 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd)
ch = brd->channels[i];
- clear_bit((int)(ch->uart_port.line), linemap);
+ clear_bit(ch->uart_port.line, linemap);
uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
}
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 7313c2edcb8..54dd16d66a4 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -461,6 +461,94 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_txx9_port *up)
+{
+ unsigned int tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ while (--tmout &&
+ !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
+ udelay(1);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
+ udelay(1);
+ }
+}
+#endif
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial_txx9_get_poll_char(struct uart_port *port)
+{
+ unsigned int ier;
+ unsigned char c;
+ struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
+ ;
+
+ c = sio_in(up, TXX9_SIRFIFO);
+
+ /*
+ * Finally, clear RX interrupt status
+ * and restore the IER
+ */
+ sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
+ sio_out(up, TXX9_SIDICR, ier);
+ return c;
+}
+
+
+static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
+{
+ unsigned int ier;
+ struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ wait_for_xmitr(up);
+ /*
+ * Send the character out.
+ * If a LF, also do CR...
+ */
+ sio_out(up, TXX9_SITFIFO, c);
+ if (c == 10) {
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SITFIFO, 13);
+ }
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SIDICR, ier);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
static int serial_txx9_startup(struct uart_port *port)
{
struct uart_txx9_port *up = (struct uart_txx9_port *)port;
@@ -781,6 +869,10 @@ static struct uart_ops serial_txx9_pops = {
.release_port = serial_txx9_release_port,
.request_port = serial_txx9_request_port,
.config_port = serial_txx9_config_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = serial_txx9_get_poll_char,
+ .poll_put_char = serial_txx9_put_poll_char,
+#endif
};
static struct uart_txx9_port serial_txx9_ports[UART_NR];
@@ -803,27 +895,6 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
-/*
- * Wait for transmitter & holding register to empty
- */
-static inline void wait_for_xmitr(struct uart_txx9_port *up)
-{
- unsigned int tmout = 10000;
-
- /* Wait up to 10ms for the character(s) to be sent. */
- while (--tmout &&
- !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
- udelay(1);
-
- /* Wait up to 1s for flow control if necessary */
- if (up->port.flags & UPF_CONS_FLOW) {
- tmout = 1000000;
- while (--tmout &&
- (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
- udelay(1);
- }
-}
-
static void serial_txx9_console_putchar(struct uart_port *port, int ch)
{
struct uart_txx9_port *up = (struct uart_txx9_port *)port;
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index aa90ddb3706..8980a5640bd 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
master->setup = uwire_setup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 2a5abc08e85..f1db395dd88 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
+ int do_setup = -1;
+ int (*setup_transfer)(struct spi_device *,
+ struct spi_transfer *);
+
+ setup_transfer = bitbang->setup_transfer;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work)
unsigned tmp;
unsigned cs_change;
int status;
- int (*setup_transfer)(struct spi_device *,
- struct spi_transfer *);
m = container_of(bitbang->queue.next, struct spi_message,
queue);
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work)
tmp = 0;
cs_change = 1;
status = 0;
- setup_transfer = NULL;
list_for_each_entry (t, &m->transfers, transfer_list) {
- /* override or restore speed and wordsize */
- if (t->speed_hz || t->bits_per_word) {
- setup_transfer = bitbang->setup_transfer;
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
if (!setup_transfer) {
status = -ENOPROTOOPT;
break;
}
- }
- if (setup_transfer) {
status = setup_transfer(spi, t);
if (status < 0)
break;
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work)
m->status = status;
m->complete(m->context);
- /* restore speed and wordsize */
- if (setup_transfer)
+ /* restore speed and wordsize if it was overridden */
+ if (do_setup == 1)
setup_transfer(spi, NULL);
+ do_setup = 0;
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d869c4d3eb..606e7a40a8d 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
/* Bit masks for spi_device.mode management. Note that incorrect
- * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other
- * devices on a shared bus: CS_HIGH, because this device will be
- * active when it shouldn't be; 3WIRE, because when active it won't
- * behave as it should.
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
*
- * REVISIT should changing those two modes be privileged?
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
- | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP)
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY)
struct spidev_data {
dev_t devt;
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index 3c839e37d37..c0a583cc222 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -12,7 +12,6 @@
obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
octeon-ethernet-objs := ethernet.o
-octeon-ethernet-objs += ethernet-common.o
octeon-ethernet-objs += ethernet-mdio.o
octeon-ethernet-objs += ethernet-mem.o
octeon-ethernet-objs += ethernet-proc.o
diff --git a/drivers/staging/octeon/ethernet-common.c b/drivers/staging/octeon/ethernet-common.c
deleted file mode 100644
index 3e6f5b8cc63..00000000000
--- a/drivers/staging/octeon/ethernet-common.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/**********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-**********************************************************************/
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <net/dst.h>
-
-#include <asm/atomic.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
-#include "ethernet-tx.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-#include "octeon-ethernet.h"
-#include "ethernet-common.h"
-
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-fau.h"
-#include "cvmx-helper.h"
-
-#include "cvmx-gmxx-defs.h"
-
-/**
- * Get the low level ethernet statistics
- *
- * @dev: Device to get the statistics from
- * Returns Pointer to the statistics
- */
-static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
-{
- cvmx_pip_port_status_t rx_status;
- cvmx_pko_port_status_t tx_status;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (octeon_is_simulation()) {
- /* The simulator doesn't support statistics */
- memset(&rx_status, 0, sizeof(rx_status));
- memset(&tx_status, 0, sizeof(tx_status));
- } else {
- cvmx_pip_get_port_status(priv->port, 1, &rx_status);
- cvmx_pko_get_port_status(priv->port, 1, &tx_status);
- }
-
- priv->stats.rx_packets += rx_status.inb_packets;
- priv->stats.tx_packets += tx_status.packets;
- priv->stats.rx_bytes += rx_status.inb_octets;
- priv->stats.tx_bytes += tx_status.octets;
- priv->stats.multicast += rx_status.multicast_packets;
- priv->stats.rx_crc_errors += rx_status.inb_errors;
- priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
-
- /*
- * The drop counter must be incremented atomically
- * since the RX tasklet also increments it.
- */
-#ifdef CONFIG_64BIT
- atomic64_add(rx_status.dropped_packets,
- (atomic64_t *)&priv->stats.rx_dropped);
-#else
- atomic_add(rx_status.dropped_packets,
- (atomic_t *)&priv->stats.rx_dropped);
-#endif
- }
-
- return &priv->stats;
-}
-
-/**
- * Set the multicast list. Currently unimplemented.
- *
- * @dev: Device to work on
- */
-static void cvm_oct_common_set_multicast_list(struct net_device *dev)
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- union cvmx_gmxx_rxx_adr_ctl control;
- control.u64 = 0;
- control.s.bcst = 1; /* Allow broadcast MAC addresses */
-
- if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
- (dev->flags & IFF_PROMISC))
- /* Force accept multicast packets */
- control.s.mcst = 2;
- else
- /* Force reject multicat packets */
- control.s.mcst = 1;
-
- if (dev->flags & IFF_PROMISC)
- /*
- * Reject matches if promisc. Since CAM is
- * shut off, should accept everything.
- */
- control.s.cam_mode = 0;
- else
- /* Filter packets based on the CAM */
- control.s.cam_mode = 1;
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
- control.u64);
- if (dev->flags & IFF_PROMISC)
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 0);
- else
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 1);
-
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
-}
-
-/**
- * Set the hardware MAC address for a device
- *
- * @dev: Device to change the MAC address for
- * @addr: Address structure to change it too. MAC address is addr + 2.
- * Returns Zero on success
- */
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- memcpy(dev->dev_addr, addr + 2, 6);
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int i;
- uint8_t *ptr = addr;
- uint64_t mac = 0;
- for (i = 0; i < 6; i++)
- mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[2]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[3]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[4]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[5]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[6]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[7]);
- cvm_oct_common_set_multicast_list(dev);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
- return 0;
-}
-
-/**
- * Change the link MTU. Unimplemented
- *
- * @dev: Device to change
- * @new_mtu: The new MTU
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- int vlan_bytes = 4;
-#else
- int vlan_bytes = 0;
-#endif
-
- /*
- * Limit the MTU to make sure the ethernet packets are between
- * 64 bytes and 65535 bytes.
- */
- if ((new_mtu + 14 + 4 + vlan_bytes < 64)
- || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
- pr_err("MTU must be between %d and %d.\n",
- 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
- return -EINVAL;
- }
- dev->mtu = new_mtu;
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- /* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + 14 + 4 + vlan_bytes;
-
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Signal errors on packets larger than the MTU */
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
- max_packet);
- } else {
- /*
- * Set the hardware to truncate packets larger
- * than the MTU and smaller the 64 bytes.
- */
- union cvmx_pip_frm_len_chkx frm_len_chk;
- frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = 64;
- frm_len_chk.s.maxlen = max_packet;
- cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
- frm_len_chk.u64);
- }
- /*
- * Set the hardware to truncate packets larger than
- * the MTU. The jabber register must be set to a
- * multiple of 8 bytes, so round up.
- */
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
- (max_packet + 7) & ~7u);
- }
- return 0;
-}
-
-/**
- * Per network device initialization
- *
- * @dev: Device to initialize
- * Returns Zero on success
- */
-int cvm_oct_common_init(struct net_device *dev)
-{
- static int count;
- char mac[8] = { 0x00, 0x00,
- octeon_bootinfo->mac_addr_base[0],
- octeon_bootinfo->mac_addr_base[1],
- octeon_bootinfo->mac_addr_base[2],
- octeon_bootinfo->mac_addr_base[3],
- octeon_bootinfo->mac_addr_base[4],
- octeon_bootinfo->mac_addr_base[5] + count
- };
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- /*
- * Force the interface to use the POW send if always_use_pow
- * was specified or it is in the pow send list.
- */
- if ((pow_send_group != -1)
- && (always_use_pow || strstr(pow_send_list, dev->name)))
- priv->queue = -1;
-
- if (priv->queue != -1) {
- dev->hard_start_xmit = cvm_oct_xmit;
- if (USE_HW_TCPUDP_CHECKSUM)
- dev->features |= NETIF_F_IP_CSUM;
- } else
- dev->hard_start_xmit = cvm_oct_xmit_pow;
- count++;
-
- dev->get_stats = cvm_oct_common_get_stats;
- dev->set_mac_address = cvm_oct_common_set_mac_address;
- dev->set_multicast_list = cvm_oct_common_set_multicast_list;
- dev->change_mtu = cvm_oct_common_change_mtu;
- dev->do_ioctl = cvm_oct_ioctl;
- /* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
- SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = cvm_oct_poll_controller;
-#endif
-
- cvm_oct_mdio_setup_device(dev);
- dev->set_mac_address(dev, mac);
- dev->change_mtu(dev, dev->mtu);
-
- /*
- * Zero out stats for port so we won't mistakenly show
- * counters from the bootloader.
- */
- memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats));
-
- return 0;
-}
-
-void cvm_oct_common_uninit(struct net_device *dev)
-{
- /* Currently nothing to do */
-}
diff --git a/drivers/staging/octeon/ethernet-common.h b/drivers/staging/octeon/ethernet-common.h
deleted file mode 100644
index 2bd9cd76a39..00000000000
--- a/drivers/staging/octeon/ethernet-common.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-*********************************************************************/
-
-int cvm_oct_common_init(struct net_device *dev);
-void cvm_oct_common_uninit(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 8f7374e7664..f13131b03c3 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -117,6 +117,8 @@
/* Maximum number of packets to process per interrupt. */
#define MAX_RX_PACKETS 120
+/* Maximum number of SKBs to try to free per xmit packet. */
+#define MAX_SKB_TO_FREE 10
#define MAX_OUT_QUEUE_DEPTH 1000
#ifndef CONFIG_SMP
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 8579f1670d1..8704133fe12 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -33,7 +33,6 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-helper.h"
@@ -265,7 +264,7 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
return return_status;
}
-static int cvm_oct_rgmii_open(struct net_device *dev)
+int cvm_oct_rgmii_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -286,7 +285,7 @@ static int cvm_oct_rgmii_open(struct net_device *dev)
return 0;
}
-static int cvm_oct_rgmii_stop(struct net_device *dev)
+int cvm_oct_rgmii_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -305,9 +304,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
int r;
cvm_oct_common_init(dev);
- dev->open = cvm_oct_rgmii_open;
- dev->stop = cvm_oct_rgmii_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
/*
* Due to GMX errata in CN3XXX series chips, it is necessary
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 58fa39c1d67..2b54996bd85 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -34,13 +34,12 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "ethernet-common.h"
#include "cvmx-helper.h"
#include "cvmx-gmxx-defs.h"
-static int cvm_oct_sgmii_open(struct net_device *dev)
+int cvm_oct_sgmii_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -61,7 +60,7 @@ static int cvm_oct_sgmii_open(struct net_device *dev)
return 0;
}
-static int cvm_oct_sgmii_stop(struct net_device *dev)
+int cvm_oct_sgmii_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -113,9 +112,7 @@ int cvm_oct_sgmii_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
- dev->open = cvm_oct_sgmii_open;
- dev->stop = cvm_oct_sgmii_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
if (!octeon_is_simulation())
priv->poll = cvm_oct_sgmii_poll;
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index e0971bbe4dd..66190b0cb68 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -33,7 +33,6 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-spi.h"
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 77b7122c8fd..81a851390f1 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -47,6 +47,7 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-tx.h"
#include "ethernet-util.h"
#include "cvmx-wqe.h"
@@ -82,8 +83,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
uint64_t old_scratch2;
int dropped;
int qos;
+ int queue_it_up;
struct octeon_ethernet *priv = netdev_priv(dev);
- int32_t in_use;
+ int32_t skb_to_free;
+ int32_t undo;
int32_t buffers_to_free;
#if REUSE_SKBUFFS_WITHOUT_FREE
unsigned char *fpa_head;
@@ -120,15 +123,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
/*
- * Assume we're going to be able t osend this
- * packet. Fetch and increment the number of pending
- * packets for output.
+ * Fetch and increment the number of packets to be
+ * freed.
*/
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
FAU_NUM_PACKET_BUFFERS_TO_FREE,
0);
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- priv->fau + qos * 4, 1);
+ priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
}
/*
@@ -253,10 +256,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* The skbuff will be reused without ever being freed. We must
- * cleanup a bunch of Linux stuff.
+ * cleanup a bunch of core things.
*/
- dst_release(skb->dst);
- skb->dst = NULL;
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, NULL);
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
skb->sp = NULL;
@@ -286,16 +289,30 @@ dont_put_skbuff_in_hw:
if (USE_ASYNC_IOBDMA) {
/* Get the number of skbuffs in use by the hardware */
CVMX_SYNCIOBDMA;
- in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
} else {
/* Get the number of skbuffs in use by the hardware */
- in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
buffers_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
/*
+ * We try to claim MAX_SKB_TO_FREE buffers. If there were not
+ * that many available, we have to un-claim (undo) any that
+ * were in excess. If skb_to_free is positive we will free
+ * that many buffers.
+ */
+ undo = skb_to_free > 0 ?
+ MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
+ MAX_SKB_TO_FREE : -skb_to_free;
+
+ /*
* If we're sending faster than the receive can free them then
* don't do the HW free.
*/
@@ -330,38 +347,31 @@ dont_put_skbuff_in_hw:
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
}
+ queue_it_up = 0;
if (unlikely(dropped)) {
dev_kfree_skb_any(skb);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
priv->stats.tx_dropped++;
} else {
if (USE_SKBUFFS_IN_HW) {
/* Put this packet on the queue to be freed later */
if (pko_command.s.dontfree)
- skb_queue_tail(&priv->tx_free_list[qos], skb);
- else {
+ queue_it_up = 1;
+ else
cvmx_fau_atomic_add32
(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
- }
} else {
/* Put this packet on the queue to be freed later */
- skb_queue_tail(&priv->tx_free_list[qos], skb);
+ queue_it_up = 1;
}
}
- /* Free skbuffs not in use by the hardware, possibly two at a time */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) {
+ if (queue_it_up) {
spin_lock(&priv->tx_free_list[qos].lock);
- /*
- * Check again now that we have the lock. It might
- * have changed.
- */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ __skb_queue_tail(&priv->tx_free_list[qos], skb);
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0);
spin_unlock(&priv->tx_free_list[qos].lock);
+ } else {
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
}
return 0;
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index 5106236fe98..c0bebf750bc 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -30,3 +30,28 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_shutdown(struct net_device *dev);
+
+/**
+ * Free dead transmit skbs.
+ *
+ * @priv: The driver data
+ * @skb_to_free: The number of SKBs to free (free none if negative).
+ * @qos: The queue to free from.
+ * @take_lock: If true, acquire the skb list lock.
+ */
+static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
+ int skb_to_free,
+ int qos, int take_lock)
+{
+ /* Free skbuffs not in use by the hardware. */
+ if (skb_to_free > 0) {
+ if (take_lock)
+ spin_lock(&priv->tx_free_list[qos].lock);
+ while (skb_to_free > 0) {
+ dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ skb_to_free--;
+ }
+ if (take_lock)
+ spin_unlock(&priv->tx_free_list[qos].lock);
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index f08eb32e04f..0c2e7cc40f3 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -33,14 +33,13 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-helper.h"
#include "cvmx-gmxx-defs.h"
-static int cvm_oct_xaui_open(struct net_device *dev)
+int cvm_oct_xaui_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -60,7 +59,7 @@ static int cvm_oct_xaui_open(struct net_device *dev)
return 0;
}
-static int cvm_oct_xaui_stop(struct net_device *dev)
+int cvm_oct_xaui_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -112,9 +111,7 @@ int cvm_oct_xaui_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
- dev->open = cvm_oct_xaui_open;
- dev->stop = cvm_oct_xaui_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
if (!octeon_is_simulation())
priv->poll = cvm_oct_xaui_poll;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index e8ef9e0b791..b8479517dce 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -37,13 +37,14 @@
#include <asm/octeon/octeon.h>
#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
#include "ethernet-tx.h"
+#include "ethernet-mdio.h"
#include "ethernet-util.h"
#include "ethernet-proc.h"
-#include "ethernet-common.h"
-#include "octeon-ethernet.h"
+
#include "cvmx-pip.h"
#include "cvmx-pko.h"
@@ -51,6 +52,7 @@
#include "cvmx-ipd.h"
#include "cvmx-helper.h"
+#include "cvmx-gmxx-defs.h"
#include "cvmx-smix-defs.h"
#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
@@ -129,53 +131,55 @@ extern struct semaphore mdio_sem;
*/
static void cvm_do_timer(unsigned long arg)
{
+ int32_t skb_to_free, undo;
+ int queues_per_port;
+ int qos;
+ struct octeon_ethernet *priv;
static int port;
- if (port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (cvm_oct_device[port]) {
- int queues_per_port;
- int qos;
- struct octeon_ethernet *priv =
- netdev_priv(cvm_oct_device[port]);
- if (priv->poll) {
- /* skip polling if we don't get the lock */
- if (!down_trylock(&mdio_sem)) {
- priv->poll(cvm_oct_device[port]);
- up(&mdio_sem);
- }
- }
- queues_per_port = cvmx_pko_get_num_queues(port);
- /* Drain any pending packets in the free list */
- for (qos = 0; qos < queues_per_port; qos++) {
- if (skb_queue_len(&priv->tx_free_list[qos])) {
- spin_lock(&priv->tx_free_list[qos].
- lock);
- while (skb_queue_len
- (&priv->tx_free_list[qos]) >
- cvmx_fau_fetch_and_add32(priv->
- fau +
- qos * 4,
- 0))
- dev_kfree_skb(__skb_dequeue
- (&priv->
- tx_free_list
- [qos]));
- spin_unlock(&priv->tx_free_list[qos].
- lock);
- }
- }
- cvm_oct_device[port]->get_stats(cvm_oct_device[port]);
- }
- port++;
- /* Poll the next port in a 50th of a second.
- This spreads the polling of ports out a little bit */
- mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
- } else {
+ if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
+ /*
+ * All ports have been polled. Start the next
+ * iteration through the ports in one second.
+ */
port = 0;
- /* All ports have been polled. Start the next iteration through
- the ports in one second */
mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
+ return;
+ }
+ if (!cvm_oct_device[port])
+ goto out;
+
+ priv = netdev_priv(cvm_oct_device[port]);
+ if (priv->poll) {
+ /* skip polling if we don't get the lock */
+ if (!down_trylock(&mdio_sem)) {
+ priv->poll(cvm_oct_device[port]);
+ up(&mdio_sem);
+ }
}
+
+ queues_per_port = cvmx_pko_get_num_queues(port);
+ /* Drain any pending packets in the free list */
+ for (qos = 0; qos < queues_per_port; qos++) {
+ if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
+ continue;
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
+ undo = skb_to_free > 0 ?
+ MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
+ MAX_SKB_TO_FREE : -skb_to_free;
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
+ }
+ cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
+
+out:
+ port++;
+ /* Poll the next port in a 50th of a second.
+ This spreads the polling of ports out a little bit */
+ mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
}
/**
@@ -246,6 +250,362 @@ int cvm_oct_free_work(void *work_queue_entry)
EXPORT_SYMBOL(cvm_oct_free_work);
/**
+ * Get the low level ethernet statistics
+ *
+ * @dev: Device to get the statistics from
+ * Returns Pointer to the statistics
+ */
+static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
+{
+ cvmx_pip_port_status_t rx_status;
+ cvmx_pko_port_status_t tx_status;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
+ if (octeon_is_simulation()) {
+ /* The simulator doesn't support statistics */
+ memset(&rx_status, 0, sizeof(rx_status));
+ memset(&tx_status, 0, sizeof(tx_status));
+ } else {
+ cvmx_pip_get_port_status(priv->port, 1, &rx_status);
+ cvmx_pko_get_port_status(priv->port, 1, &tx_status);
+ }
+
+ priv->stats.rx_packets += rx_status.inb_packets;
+ priv->stats.tx_packets += tx_status.packets;
+ priv->stats.rx_bytes += rx_status.inb_octets;
+ priv->stats.tx_bytes += tx_status.octets;
+ priv->stats.multicast += rx_status.multicast_packets;
+ priv->stats.rx_crc_errors += rx_status.inb_errors;
+ priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
+
+ /*
+ * The drop counter must be incremented atomically
+ * since the RX tasklet also increments it.
+ */
+#ifdef CONFIG_64BIT
+ atomic64_add(rx_status.dropped_packets,
+ (atomic64_t *)&priv->stats.rx_dropped);
+#else
+ atomic_add(rx_status.dropped_packets,
+ (atomic_t *)&priv->stats.rx_dropped);
+#endif
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * Change the link MTU. Unimplemented
+ *
+ * @dev: Device to change
+ * @new_mtu: The new MTU
+ *
+ * Returns Zero on success
+ */
+static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ int vlan_bytes = 4;
+#else
+ int vlan_bytes = 0;
+#endif
+
+ /*
+ * Limit the MTU to make sure the ethernet packets are between
+ * 64 bytes and 65535 bytes.
+ */
+ if ((new_mtu + 14 + 4 + vlan_bytes < 64)
+ || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
+ pr_err("MTU must be between %d and %d.\n",
+ 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ /* Add ethernet header and FCS, and VLAN if configured. */
+ int max_packet = new_mtu + 14 + 4 + vlan_bytes;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Signal errors on packets larger than the MTU */
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
+ max_packet);
+ } else {
+ /*
+ * Set the hardware to truncate packets larger
+ * than the MTU and smaller the 64 bytes.
+ */
+ union cvmx_pip_frm_len_chkx frm_len_chk;
+ frm_len_chk.u64 = 0;
+ frm_len_chk.s.minlen = 64;
+ frm_len_chk.s.maxlen = max_packet;
+ cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
+ frm_len_chk.u64);
+ }
+ /*
+ * Set the hardware to truncate packets larger than
+ * the MTU. The jabber register must be set to a
+ * multiple of 8 bytes, so round up.
+ */
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
+ (max_packet + 7) & ~7u);
+ }
+ return 0;
+}
+
+/**
+ * Set the multicast list. Currently unimplemented.
+ *
+ * @dev: Device to work on
+ */
+static void cvm_oct_common_set_multicast_list(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ union cvmx_gmxx_rxx_adr_ctl control;
+ control.u64 = 0;
+ control.s.bcst = 1; /* Allow broadcast MAC addresses */
+
+ if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
+ (dev->flags & IFF_PROMISC))
+ /* Force accept multicast packets */
+ control.s.mcst = 2;
+ else
+ /* Force reject multicat packets */
+ control.s.mcst = 1;
+
+ if (dev->flags & IFF_PROMISC)
+ /*
+ * Reject matches if promisc. Since CAM is
+ * shut off, should accept everything.
+ */
+ control.s.cam_mode = 0;
+ else
+ /* Filter packets based on the CAM */
+ control.s.cam_mode = 1;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
+ control.u64);
+ if (dev->flags & IFF_PROMISC)
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 0);
+ else
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 1);
+
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+}
+
+/**
+ * Set the hardware MAC address for a device
+ *
+ * @dev: Device to change the MAC address for
+ * @addr: Address structure to change it too. MAC address is addr + 2.
+ * Returns Zero on success
+ */
+static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ memcpy(dev->dev_addr, addr + 2, 6);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int i;
+ uint8_t *ptr = addr;
+ uint64_t mac = 0;
+ for (i = 0; i < 6; i++)
+ mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
+ ptr[2]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
+ ptr[3]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
+ ptr[4]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
+ ptr[5]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
+ ptr[6]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
+ ptr[7]);
+ cvm_oct_common_set_multicast_list(dev);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+ return 0;
+}
+
+/**
+ * Per network device initialization
+ *
+ * @dev: Device to initialize
+ * Returns Zero on success
+ */
+int cvm_oct_common_init(struct net_device *dev)
+{
+ static int count;
+ char mac[8] = { 0x00, 0x00,
+ octeon_bootinfo->mac_addr_base[0],
+ octeon_bootinfo->mac_addr_base[1],
+ octeon_bootinfo->mac_addr_base[2],
+ octeon_bootinfo->mac_addr_base[3],
+ octeon_bootinfo->mac_addr_base[4],
+ octeon_bootinfo->mac_addr_base[5] + count
+ };
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ /*
+ * Force the interface to use the POW send if always_use_pow
+ * was specified or it is in the pow send list.
+ */
+ if ((pow_send_group != -1)
+ && (always_use_pow || strstr(pow_send_list, dev->name)))
+ priv->queue = -1;
+
+ if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
+ dev->features |= NETIF_F_IP_CSUM;
+
+ count++;
+
+ /* We do our own locking, Linux doesn't need to */
+ dev->features |= NETIF_F_LLTX;
+ SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+
+ cvm_oct_mdio_setup_device(dev);
+ dev->netdev_ops->ndo_set_mac_address(dev, mac);
+ dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
+
+ /*
+ * Zero out stats for port so we won't mistakenly show
+ * counters from the bootloader.
+ */
+ memset(dev->netdev_ops->ndo_get_stats(dev), 0,
+ sizeof(struct net_device_stats));
+
+ return 0;
+}
+
+void cvm_oct_common_uninit(struct net_device *dev)
+{
+ /* Currently nothing to do */
+}
+
+static const struct net_device_ops cvm_oct_npi_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
+ .ndo_init = cvm_oct_xaui_init,
+ .ndo_uninit = cvm_oct_xaui_uninit,
+ .ndo_open = cvm_oct_xaui_open,
+ .ndo_stop = cvm_oct_xaui_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
+ .ndo_init = cvm_oct_sgmii_init,
+ .ndo_uninit = cvm_oct_sgmii_uninit,
+ .ndo_open = cvm_oct_sgmii_open,
+ .ndo_stop = cvm_oct_sgmii_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_spi_netdev_ops = {
+ .ndo_init = cvm_oct_spi_init,
+ .ndo_uninit = cvm_oct_spi_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
+ .ndo_init = cvm_oct_rgmii_init,
+ .ndo_uninit = cvm_oct_rgmii_uninit,
+ .ndo_open = cvm_oct_rgmii_open,
+ .ndo_stop = cvm_oct_rgmii_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_pow_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_start_xmit = cvm_oct_xmit_pow,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+/**
* Module/ driver initialization. Creates the linux network
* devices.
*
@@ -303,7 +663,7 @@ static int __init cvm_oct_init_module(void)
struct octeon_ethernet *priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct octeon_ethernet));
- dev->init = cvm_oct_common_init;
+ dev->netdev_ops = &cvm_oct_pow_netdev_ops;
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
priv->queue = -1;
@@ -372,44 +732,38 @@ static int __init cvm_oct_init_module(void)
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
- dev->init = cvm_oct_common_init;
- dev->uninit = cvm_oct_common_uninit;
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
strcpy(dev->name, "npi%d");
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
- dev->init = cvm_oct_xaui_init;
- dev->uninit = cvm_oct_xaui_uninit;
+ dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
strcpy(dev->name, "xaui%d");
break;
case CVMX_HELPER_INTERFACE_MODE_LOOP:
- dev->init = cvm_oct_common_init;
- dev->uninit = cvm_oct_common_uninit;
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
strcpy(dev->name, "loop%d");
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
- dev->init = cvm_oct_sgmii_init;
- dev->uninit = cvm_oct_sgmii_uninit;
+ dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
- dev->init = cvm_oct_spi_init;
- dev->uninit = cvm_oct_spi_uninit;
+ dev->netdev_ops = &cvm_oct_spi_netdev_ops;
strcpy(dev->name, "spi%d");
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
- dev->init = cvm_oct_rgmii_init;
- dev->uninit = cvm_oct_rgmii_uninit;
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
}
- if (!dev->init) {
+ if (!dev->netdev_ops) {
kfree(dev);
} else if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device "
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index b3199076ef5..3aef9878fc0 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -111,12 +111,23 @@ static inline int cvm_oct_transmit(struct net_device *dev,
extern int cvm_oct_rgmii_init(struct net_device *dev);
extern void cvm_oct_rgmii_uninit(struct net_device *dev);
+extern int cvm_oct_rgmii_open(struct net_device *dev);
+extern int cvm_oct_rgmii_stop(struct net_device *dev);
+
extern int cvm_oct_sgmii_init(struct net_device *dev);
extern void cvm_oct_sgmii_uninit(struct net_device *dev);
+extern int cvm_oct_sgmii_open(struct net_device *dev);
+extern int cvm_oct_sgmii_stop(struct net_device *dev);
+
extern int cvm_oct_spi_init(struct net_device *dev);
extern void cvm_oct_spi_uninit(struct net_device *dev);
extern int cvm_oct_xaui_init(struct net_device *dev);
extern void cvm_oct_xaui_uninit(struct net_device *dev);
+extern int cvm_oct_xaui_open(struct net_device *dev);
+extern int cvm_oct_xaui_stop(struct net_device *dev);
+
+extern int cvm_oct_common_init(struct net_device *dev);
+extern void cvm_oct_common_uninit(struct net_device *dev);
extern int always_use_pow;
extern int pow_send_group;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 38bfdb0f666..3f104599347 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -550,7 +550,7 @@ static void acm_waker(struct work_struct *waker)
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct acm *acm;
- int rv = -EINVAL;
+ int rv = -ENODEV;
int i;
dbg("Entering acm_tty_open.");
@@ -677,7 +677,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
/* Perform the closing process and see if we need to do the hardware
shutdown */
- if (tty_port_close_start(&acm->port, tty, filp) == 0)
+ if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0)
return;
acm_port_down(acm, 0);
tty_port_close_end(&acm->port, tty);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d595aa5586a..a84216464ca 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -333,6 +333,9 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
{
struct usb_serial_port *port = tty->driver_data;
+ if (!port)
+ return;
+
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 559f8784acf..9052bcb4f52 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -501,7 +501,7 @@ int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
int result = -ENOANO;
struct uwb_rceb *rceb = *header;
int event = le16_to_cpu(rceb->wEvent);
- size_t event_size;
+ ssize_t event_size;
size_t core_size, offset;
if (rceb->bEventType != UWB_RC_CET_GENERAL)
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c
index cd2035768b4..86a853b8411 100644
--- a/drivers/uwb/wlp/txrx.c
+++ b/drivers/uwb/wlp/txrx.c
@@ -326,7 +326,7 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp,
int result = -EINVAL;
struct ethhdr *eth_hdr = (void *) skb->data;
- if (is_broadcast_ether_addr(eth_hdr->h_dest)) {
+ if (is_multicast_ether_addr(eth_hdr->h_dest)) {
result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb);
if (result < 0) {
if (printk_ratelimit())
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 018850c116c..497ff8af03e 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2414,7 +2414,10 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
if (err)
return err;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- return fbhw->encode_fix(fix, &par);
+ mutex_lock(&info->mm_lock);
+ err = fbhw->encode_fix(fix, &par);
+ mutex_unlock(&info->mm_lock);
+ return err;
}
static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2743,7 +2746,9 @@ static int atafb_set_par(struct fb_info *info)
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
+ mutex_lock(&info->mm_lock);
fbhw->encode_fix(&info->fix, par);
+ mutex_unlock(&info->mm_lock);
/* Set new videomode */
ata_set_par(par);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5afd64482f5..cb88394ba99 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -270,7 +270,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
smem_len = (var->xres_virtual * var->yres_virtual
* ((var->bits_per_pixel + 7) / 8));
+ mutex_lock(&info->mm_lock);
info->fix.smem_len = max(smem_len, sinfo->smem_len);
+ mutex_unlock(&info->mm_lock);
info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
(dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 7691e73823d..1f39a62f899 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -187,6 +187,8 @@ struct atyfb_par {
int mtrr_reg;
#endif
u32 mem_cntl;
+ struct crtc saved_crtc;
+ union aty_pll saved_pll;
};
/*
@@ -217,6 +219,7 @@ struct atyfb_par {
#define M64F_XL_DLL 0x00080000
#define M64F_MFB_FORCE_4 0x00100000
#define M64F_HW_TRIPLE 0x00200000
+#define M64F_XL_MEM 0x00400000
/*
* Register access
*/
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1207c208a30..63d3739d43a 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,8 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/backlight.h>
+#include <linux/reboot.h>
+#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info);
static int store_video_par(char *videopar, unsigned char m64_num);
#endif
-static struct crtc saved_crtc;
-static union aty_pll saved_pll;
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
static int read_aty_sense(const struct atyfb_par *par);
#endif
+static DEFINE_MUTEX(reboot_lock);
+static struct fb_info *reboot_info;
/*
* Interface used by the world
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
-#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4)
-#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS)
+#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
+#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
static struct {
u16 pci_id;
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO";
static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
+static char ram_wram[] __devinitdata = "WRAM";
static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = {
#ifdef CONFIG_FB_ATY_CT
static char *aty_ct_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
+ ram_sdram, ram_sgram, ram_wram, ram_resv
+};
+static char *aty_xl_ram[8] __devinitdata = {
+ ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
#endif /* CONFIG_FB_ATY_CT */
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
+static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
+{
+ u32 line_length = vxres * bpp / 8;
+
+ if (par->ram_type == SGRAM ||
+ (!M64_HAS(XL_MEM) && par->ram_type == WRAM))
+ line_length = (line_length + 63) & ~63;
+
+ return line_length;
+}
+
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var, struct crtc *crtc)
{
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
+ u32 line_length;
/* input */
- xres = var->xres;
+ xres = (var->xres + 7) & ~7;
yres = var->yres;
- vxres = var->xres_virtual;
+ vxres = (var->xres_virtual + 7) & ~7;
vyres = var->yres_virtual;
- xoffset = var->xoffset;
+ xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
bpp = var->bits_per_pixel;
if (bpp == 16)
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
} else
FAIL("invalid bpp");
- if (vxres * vyres * bpp / 8 > info->fix.smem_len)
+ line_length = calc_line_length(par, vxres, bpp);
+
+ if (vyres * line_length > info->fix.smem_len)
FAIL("not enough video RAM");
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->xoffset = xoffset;
crtc->yoffset = yoffset;
crtc->bpp = bpp;
- crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19);
+ crtc->off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
crtc->h_tot_disp = h_total | (h_disp<<16);
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info)
}
aty_st_8(DAC_MASK, 0xff, par);
- info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8;
+ info->fix.line_length = calc_line_length(par, var->xres_virtual,
+ var->bits_per_pixel);
+
info->fix.visual = var->bits_per_pixel <= 8 ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
{
u32 xoffset = info->var.xoffset;
u32 yoffset = info->var.yoffset;
- u32 vxres = par->crtc.vxres;
+ u32 line_length = info->fix.line_length;
u32 bpp = info->var.bits_per_pixel;
- par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19);
+ par->crtc.off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
}
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
const int *refresh_tbl;
int i, size;
- if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) {
+ if (M64_HAS(XL_MEM)) {
refresh_tbl = ragexl_tbl;
size = ARRAY_SIZE(ragexl_tbl);
} else {
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info)
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
- ramname = aty_ct_ram[par->ram_type];
+ if (M64_HAS(XL_MEM))
+ ramname = aty_xl_ram[par->ram_type];
+ else
+ ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
par->pll_limits.mclk = 63;
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info)
#endif /* CONFIG_FB_ATY_CT */
/* save previous video mode */
- aty_get_crtc(par, &saved_crtc);
+ aty_get_crtc(par, &par->saved_crtc);
if(par->pll_ops->get_pll)
- par->pll_ops->get_pll(info, &saved_pll);
+ par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info)
aty_init_exit:
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
par->mmap_map[1].prot_flag = _PAGE_E;
#endif /* __sparc__ */
+ mutex_lock(&reboot_lock);
+ if (!reboot_info)
+ reboot_info = info;
+ mutex_unlock(&reboot_lock);
+
return 0;
err_release_io:
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info)
struct atyfb_par *par = (struct atyfb_par *) info->par;
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
unregister_framebuffer(info);
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
+ mutex_lock(&reboot_lock);
+ if (reboot_info == info)
+ reboot_info = NULL;
+ mutex_unlock(&reboot_lock);
+
atyfb_remove(info);
}
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options)
}
#endif /* MODULE */
+static int atyfb_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct atyfb_par *par;
+
+ if (code != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ mutex_lock(&reboot_lock);
+
+ if (!reboot_info)
+ goto out;
+
+ if (!lock_fb_info(reboot_info))
+ goto out;
+
+ par = reboot_info->par;
+
+ /*
+ * HP OmniBook 500's BIOS doesn't like the state of the
+ * hardware after atyfb has been used. Restore the hardware
+ * to the original state to allow successful reboots.
+ */
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(reboot_info, &par->saved_pll);
+
+ unlock_fb_info(reboot_info);
+ out:
+ mutex_unlock(&reboot_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block atyfb_reboot_notifier = {
+ .notifier_call = atyfb_reboot_notify,
+};
+
+static const struct dmi_system_id atyfb_reboot_ids[] = {
+ {
+ .ident = "HP OmniBook 500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
+ },
+ },
+
+ { }
+};
+
static int __init atyfb_init(void)
{
int err1 = 1, err2 = 1;
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void)
err2 = atyfb_atari_probe();
#endif
- return (err1 && err2) ? -ENODEV : 0;
+ if (err1 && err2)
+ return -ENODEV;
+
+ if (dmi_check_system(atyfb_reboot_ids))
+ register_reboot_notifier(&atyfb_reboot_notifier);
+
+ return 0;
}
static void __exit atyfb_exit(void)
{
+ if (dmi_check_system(atyfb_reboot_ids))
+ unregister_reboot_notifier(&atyfb_reboot_notifier);
+
#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
#endif
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 0cc9724e61a..51fcc0a2c94 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par)
void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
{
u32 pitch_value;
+ u32 vxres;
/* determine modal information from global mode structure */
- pitch_value = info->var.xres_virtual;
+ pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8);
+ vxres = info->var.xres_virtual;
if (info->var.bits_per_pixel == 24) {
/* In 24 bpp, the engine is in 8 bpp - this requires that all */
/* horizontal coordinates and widths must be adjusted */
pitch_value *= 3;
+ vxres *= 3;
}
/* On GTC (RagePro), we need to reset the 3D engine before */
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
aty_st_le32(SC_LEFT, 0, par);
aty_st_le32(SC_TOP, 0, par);
aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par);
- aty_st_le32(SC_RIGHT, pitch_value - 1, par);
+ aty_st_le32(SC_RIGHT, vxres - 1, par);
/* set background color to minimum value (usually BLACK) */
aty_st_le32(DP_BKGD_CLR, 0, par);
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 1dae7f8f3c6..51422fc4f60 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL));
+ lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
if (lcd->buf == NULL) {
kfree(lcd);
return -ENOMEM;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index f8a09bf8d0c..53ea05645ff 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1310,8 +1310,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
-__acquires(&info->lock)
-__releases(&info->lock)
{
int fbidx = iminor(file->f_path.dentry->d_inode);
struct fb_info *info = registered_fb[fbidx];
@@ -1325,16 +1323,14 @@ __releases(&info->lock)
off = vma->vm_pgoff << PAGE_SHIFT;
if (!fb)
return -ENODEV;
+ mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
- mutex_lock(&info->lock);
res = fb->fb_mmap(info, vma);
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return res;
}
- mutex_lock(&info->lock);
-
/* frame buffer memory */
start = info->fix.smem_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
@@ -1342,13 +1338,13 @@ __releases(&info->lock)
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
@@ -1518,6 +1514,7 @@ register_framebuffer(struct fb_info *fb_info)
break;
fb_info->node = i;
mutex_init(&fb_info->lock);
+ mutex_init(&fb_info->mm_lock);
fb_info->dev = device_create(fb_class, fb_info->device,
MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index f153c581cbd..0bf2190928d 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info)
static int map_video_memory(struct fb_info *info)
{
phys_addr_t phys;
+ u32 smem_len = info->fix.line_length * info->var.yres_virtual;
pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+ pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
- info->fix.smem_len = info->fix.line_length * info->var.yres_virtual;
- pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
- info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
+ info->screen_base = fsl_diu_alloc(smem_len, &phys);
if (info->screen_base == NULL) {
printk(KERN_ERR "Unable to allocate fb memory\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (unsigned long) phys;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
- info->fix.smem_start,
- info->fix.smem_len);
+ info->fix.smem_start, info->fix.smem_len);
pr_debug("screen base %p\n", info->screen_base);
return 0;
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info)
static void unmap_video_memory(struct fb_info *info)
{
fsl_diu_free(info->screen_base, info->fix.smem_len);
+ mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
+ mutex_unlock(&info->mm_lock);
}
/*
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 2e940199fc8..71960672d72 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "I810");
+ mutex_lock(&info->mm_lock);
fix->smem_start = par->fb.physical;
fix->smem_len = par->fb.size;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->xpanstep = 8;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 8e7a275df50..59c3a2e1491 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2)
struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
DBG(__func__)
+ mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
+ mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
}
static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2081,6 +2083,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
spin_lock_init(&ACCESS_FBINFO(lock.accel));
init_rwsem(&ACCESS_FBINFO(crtc2.lock));
init_rwsem(&ACCESS_FBINFO(altout.lock));
+ mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
ACCESS_FBINFO(irq_flags) = 0;
init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 7ac4c5f6145..909e10a1189 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -289,13 +289,16 @@ static int matroxfb_dh_release(struct fb_info* info, int user) {
#undef m2info
}
-static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) {
+static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info)
+{
struct fb_fix_screeninfo *fix = &m2info->fbcon.fix;
strcpy(fix->id, "MATROX DH");
+ mutex_lock(&m2info->fbcon.mm_lock);
fix->smem_start = m2info->video.base;
fix->smem_len = m2info->video.len_usable;
+ mutex_unlock(&m2info->fbcon.mm_lock);
fix->ypanstep = 1;
fix->ywrapstep = 0;
fix->xpanstep = 8; /* TBD */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index b7af5256e88..567fb944bd2 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -669,7 +669,7 @@ static uint32_t bpp_to_pixfmt(int bpp)
}
static int mx3fb_blank(int blank, struct fb_info *fbi);
-static int mx3fb_map_video_memory(struct fb_info *fbi);
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len);
static int mx3fb_unmap_video_memory(struct fb_info *fbi);
/**
@@ -742,8 +742,7 @@ static int mx3fb_set_par(struct fb_info *fbi)
if (fbi->fix.smem_start)
mx3fb_unmap_video_memory(fbi);
- fbi->fix.smem_len = mem_len;
- if (mx3fb_map_video_memory(fbi) < 0) {
+ if (mx3fb_map_video_memory(fbi, mem_len) < 0) {
mutex_unlock(&mx3_fbi->mutex);
return -ENOMEM;
}
@@ -1198,6 +1197,7 @@ static int mx3fb_resume(struct platform_device *pdev)
/**
* mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
* @fbi: framebuffer information pointer
+ * @mem_len: length of mapped memory
* @return: Error code indicating success or failure
*
* This buffer is remapped into a non-cached, non-buffered, memory region to
@@ -1205,23 +1205,26 @@ static int mx3fb_resume(struct platform_device *pdev)
* area is remapped, all virtual memory access to the video memory should occur
* at the new region.
*/
-static int mx3fb_map_video_memory(struct fb_info *fbi)
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len)
{
int retval = 0;
dma_addr_t addr;
fbi->screen_base = dma_alloc_writecombine(fbi->device,
- fbi->fix.smem_len,
+ mem_len,
&addr, GFP_DMA);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
- fbi->fix.smem_len);
+ mem_len);
retval = -EBUSY;
goto err0;
}
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = addr;
+ fbi->fix.smem_len = mem_len;
+ mutex_unlock(&fbi->mm_lock);
dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
(uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
@@ -1251,8 +1254,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
fbi->screen_base, fbi->fix.smem_start);
fbi->screen_base = 0;
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
return 0;
}
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 060d72fe57c..4ea99bfc37b 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi)
rg = &plane->fbdev->mem_desc.region[plane->idx];
fbi->screen_base = rg->vaddr;
+ mutex_lock(&fbi->mm_lock);
fix->smem_start = rg->paddr;
fix->smem_len = rg->size;
+ mutex_unlock(&fbi->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
bpp = var->bits_per_pixel;
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
* plane memory is dealloce'd, the other
* screen parameters in var / fix are invalid.
*/
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
}
}
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 03b3670130a..bacfabd9ce1 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info)
offset = 0x10;
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
+ mutex_unlock(&info->mm_lock);
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0889d50c328..6506117c134 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
ofb->video_mem_size = size;
+ mutex_lock(&ofb->fb.mm_lock);
ofb->fb.fix.smem_start = ofb->video_mem_phys;
ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
+ mutex_unlock(&ofb->fb.mm_lock);
ofb->fb.screen_base = ofb->video_mem;
return 0;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 653bdfee305..9f6d6e61f0c 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno,
return 0;
}
-static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
- unsigned long stride)
-{
- memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, "sh7760-lcdc");
-
- fix->smem_start = (unsigned long)info->screen_base;
- fix->smem_len = info->screen_size;
-
- fix->line_length = stride;
-}
-
static int sh7760fb_get_color_info(struct device *dev,
u16 lddfr, int *bpp, int *gray)
{
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info)
iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
- encode_fix(&info->fix, info, stride);
+ info->fix.line_length = stride;
+
sh7760fb_check_var(&info->var, info);
sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
info->screen_base = fbmem;
info->screen_size = vram;
+ info->fix.smem_start = (unsigned long)info->screen_base;
+ info->fix.smem_len = info->screen_size;
return 0;
}
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
+ strcpy(info->fix.id, "sh7760-lcdc");
+
/* set the DON2 bit now, before cmap allocation, as it will randomize
* palette memory.
*/
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7072d19080d..fd33455389b 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
strcpy(fix->id, ivideo->myid);
+ mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
fix->smem_len = ivideo->sisfb_mem;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index eb5d73a0670..98f24f0ec00 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
#define SM501_MEMF_ACCEL (8)
static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
- unsigned int why, size_t size)
+ unsigned int why, size_t size, u32 smem_len)
{
struct sm501fb_par *par;
struct fb_info *fbi;
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
if (ptr > 0)
ptr &= ~(PAGE_SIZE - 1);
- if (fbi && ptr < fbi->fix.smem_len)
+ if (fbi && ptr < smem_len)
return -ENOMEM;
break;
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
case SM501_MEMF_ACCEL:
fbi = inf->fb[HEAD_CRT];
- ptr = fbi ? fbi->fix.smem_len : 0;
+ ptr = fbi ? smem_len : 0;
fbi = inf->fb[HEAD_PANEL];
if (fbi) {
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
unsigned int mem_type;
unsigned int clock_type;
unsigned int head_addr;
+ unsigned int smem_len;
dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
__func__, var->xres, var->yres, var->bits_per_pixel,
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info,
/* allocate fb memory within 501 */
info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
- info->fix.smem_len = info->fix.line_length * var->yres_virtual;
+ smem_len = info->fix.line_length * var->yres_virtual;
dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
info->fix.line_length);
- if (sm501_alloc_mem(fbi, &par->screen, mem_type,
- info->fix.smem_len)) {
+ if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
dev_err(fbi->dev, "no memory available\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_base = fbi->fbmem + par->screen.sm_addr;
info->screen_size = info->fix.smem_len;
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info)
if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
/* the head is displaying panel data... */
- sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0);
+ sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
+ info->fix.smem_len);
goto out_update;
}
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
par->cursor_regs = info->regs + reg_base;
- ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024);
+ ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
+ fbi->fix.smem_len);
if (ret < 0)
return ret;
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index d0674f1e3f1..8a141c2c637 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info)
info->fix.ywrapstep = 0;
info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
+ mutex_lock(&info->mm_lock);
if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
par->extmem_active = 1;
info->fix.smem_len = par->mach->mem->size+1;
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info)
par->extmem_active = 0;
info->fix.smem_len = MEM_INT_SIZE+1;
}
+ mutex_unlock(&info->mm_lock);
w100fb_activate_var(par);
}
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 1f09d4e4144..59f708efe25 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -68,6 +68,34 @@ int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count)
return w1_ds2760_io(dev, buf, addr, count, 1);
}
+static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd)
+{
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->mutex);
+
+ if (w1_reset_select_slave(sl) == 0) {
+ w1_write_8(sl->master, cmd);
+ w1_write_8(sl->master, addr);
+ }
+
+ mutex_unlock(&sl->master->mutex);
+ return 0;
+}
+
+int w1_ds2760_store_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA);
+}
+
+int w1_ds2760_recall_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
+}
+
static ssize_t w1_ds2760_read_bin(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -200,6 +228,8 @@ static void __exit w1_ds2760_exit(void)
EXPORT_SYMBOL(w1_ds2760_read);
EXPORT_SYMBOL(w1_ds2760_write);
+EXPORT_SYMBOL(w1_ds2760_store_eeprom);
+EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
module_init(w1_ds2760_init);
module_exit(w1_ds2760_exit);
diff --git a/drivers/w1/slaves/w1_ds2760.h b/drivers/w1/slaves/w1_ds2760.h
index f1302429cb0..58e77414156 100644
--- a/drivers/w1/slaves/w1_ds2760.h
+++ b/drivers/w1/slaves/w1_ds2760.h
@@ -25,6 +25,10 @@
#define DS2760_PROTECTION_REG 0x00
#define DS2760_STATUS_REG 0x01
+ #define DS2760_STATUS_IE (1 << 2)
+ #define DS2760_STATUS_SWEN (1 << 3)
+ #define DS2760_STATUS_RNAOP (1 << 4)
+ #define DS2760_STATUS_PMOD (1 << 5)
#define DS2760_EEPROM_REG 0x07
#define DS2760_SPECIAL_FEATURE_REG 0x08
#define DS2760_VOLTAGE_MSB 0x0c
@@ -38,6 +42,7 @@
#define DS2760_EEPROM_BLOCK0 0x20
#define DS2760_ACTIVE_FULL 0x20
#define DS2760_EEPROM_BLOCK1 0x30
+#define DS2760_STATUS_WRITE_REG 0x31
#define DS2760_RATED_CAPACITY 0x32
#define DS2760_CURRENT_OFFSET_BIAS 0x33
#define DS2760_ACTIVE_EMPTY 0x3b
@@ -46,5 +51,7 @@ extern int w1_ds2760_read(struct device *dev, char *buf, int addr,
size_t count);
extern int w1_ds2760_write(struct device *dev, char *buf, int addr,
size_t count);
+extern int w1_ds2760_store_eeprom(struct device *dev, int addr);
+extern int w1_ds2760_recall_eeprom(struct device *dev, int addr);
#endif /* !__w1_ds2760_h__ */
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index a4fe7a38d9b..3bde56bce63 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void)
*/
static int wdrtas_get_temperature(void)
{
- long result;
+ int result;
int temperature = 0;
- result = rtas_call(wdrtas_token_get_sensor_state, 2, 2,
- (void *)__pa(&temperature),
- WDRTAS_THERMAL_SENSOR, 0);
+ result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature);
if (result < 0)
printk(KERN_WARNING "wdrtas: reading the thermal sensor "
- "faild: %li\n", result);
+ "failed: %i\n", result);
else
temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */