diff options
540 files changed, 14961 insertions, 11835 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index afeaf6218ea..c7a4d0faab2 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -158,13 +158,6 @@ Who: Adrian Bunk <bunk@stusta.de> --------------------------- -What: Legacy /proc/pci interface (PCI_LEGACY_PROC) -When: March 2006 -Why: deprecated since 2.5.53 in favor of lspci(8) -Who: Adrian Bunk <bunk@stusta.de> - ---------------------------- - What: pci_module_init(driver) When: January 2007 Why: Is replaced by pci_register_driver(pci_driver). diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf index bbeaba68044..79223192124 100644 --- a/Documentation/hwmon/w83627hf +++ b/Documentation/hwmon/w83627hf @@ -18,6 +18,10 @@ Supported chips: Prefix: 'w83637hf' Addresses scanned: ISA address retrieved from Super I/O registers Datasheet: http://www.winbond.com/PDF/sheet/w83637hf.pdf + * Winbond W83687THF + Prefix: 'w83687thf' + Addresses scanned: ISA address retrieved from Super I/O registers + Datasheet: Provided by Winbond on request Authors: Frodo Looijaard <frodol@dds.nl>, diff --git a/Documentation/hwmon/w83781d b/Documentation/hwmon/w83781d index e5459333ba6..b1e9f80098e 100644 --- a/Documentation/hwmon/w83781d +++ b/Documentation/hwmon/w83781d @@ -36,6 +36,11 @@ Module parameters Use 'init=0' to bypass initializing the chip. Try this if your computer crashes when you load the module. +* reset int + (default 0) + The driver used to reset the chip on load, but does no more. Use + 'reset=1' to restore the old behavior. Report if you need to do this. + force_subclients=bus,caddr,saddr,saddr This is used to force the i2c addresses for subclients of a certain chip. Typical usage is `force_subclients=0,0x2d,0x4a,0x4b' @@ -123,6 +128,25 @@ When an alarm goes off, you can be warned by a beeping signal through your computer speaker. It is possible to enable all beeping globally, or only the beeping for some alarms. +Individual alarm and beep bits: + +0x000001: in0 +0x000002: in1 +0x000004: in2 +0x000008: in3 +0x000010: temp1 +0x000020: temp2 (+temp3 on W83781D) +0x000040: fan1 +0x000080: fan2 +0x000100: in4 +0x000200: in5 +0x000400: in6 +0x000800: fan3 +0x001000: chassis +0x002000: temp3 (W83782D and W83627HF only) +0x010000: in7 (W83782D and W83627HF only) +0x020000: in8 (W83782D and W83627HF only) + If an alarm triggers, it will remain triggered until the hardware register is read at least once. This means that the cause for the alarm may already have disappeared! Note that in the current implementation, all diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4 index 856b4b8b962..a1c8f581afe 100644 --- a/Documentation/i2c/busses/i2c-piix4 +++ b/Documentation/i2c/busses/i2c-piix4 @@ -4,7 +4,7 @@ Supported adapters: * Intel 82371AB PIIX4 and PIIX4E * Intel 82443MX (440MX) Datasheet: Publicly available at the Intel website - * ServerWorks OSB4, CSB5 and CSB6 southbridges + * ServerWorks OSB4, CSB5, CSB6 and HT-1000 southbridges Datasheet: Only available via NDA from ServerWorks * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge Datasheet: Publicly available at the SMSC website http://www.smsc.com diff --git a/Documentation/i2c/busses/scx200_acb b/Documentation/i2c/busses/scx200_acb index 08c8cd1df60..f50e69981ec 100644 --- a/Documentation/i2c/busses/scx200_acb +++ b/Documentation/i2c/busses/scx200_acb @@ -6,9 +6,10 @@ Module Parameters ----------------- * base: int - Base addresses for the ACCESS.bus controllers + Base addresses for the ACCESS.bus controllers on SCx200 and SC1100 devices Description ----------- -Enable the use of the ACCESS.bus controllers of a SCx200 processor. +Enable the use of the ACCESS.bus controller on the Geode SCx200 and +SC1100 processors and the CS5535 and CS5536 Geode companion devices. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index fc99075e0af..44a25f3f51d 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -49,6 +49,7 @@ restrictions referred to are that the relevant option is valid if: MCA MCA bus support is enabled. MDA MDA console support is enabled. MOUSE Appropriate mouse support is enabled. + MSI Message Signaled Interrupts (PCI). MTD MTD support is enabled. NET Appropriate network support is enabled. NUMA NUMA support is enabled. @@ -1008,7 +1009,9 @@ running once the system is up. noexec=on: enable non-executable mappings (default) noexec=off: disable nn-executable mappings - nofxsr [BUGS=IA-32] + nofxsr [BUGS=IA-32] Disables x86 floating point extended + register save and restore. The kernel will only save + legacy floating-point registers on task switch. nohlt [BUGS=ARM] @@ -1053,6 +1056,8 @@ running once the system is up. nosbagart [IA-64] + nosep [BUGS=IA-32] Disables x86 SYSENTER/SYSEXIT support. + nosmp [SMP] Tells an SMP kernel to act as a UP kernel. nosync [HW,M68K] Disables sync negotiation for all devices. @@ -1122,6 +1127,11 @@ running once the system is up. pas16= [HW,SCSI] See header of drivers/scsi/pas16.c. + pause_on_oops= + Halt all CPUs after the first oops has been printed for + the specified number of seconds. This is to be used if + your oopses keep scrolling off the screen. + pcbit= [HW,ISDN] pcd. [PARIDE] @@ -1143,6 +1153,9 @@ running once the system is up. Mechanism 2. nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI Configuration + nomsi [MSI] If the PCI_MSI kernel config parameter is + enabled, this kernel boot option can be used to + disable the use of MSI interrupts system-wide. nosort [IA-32] Don't sort PCI devices according to order given by the PCI BIOS. This sorting is done to get a device order compatible with diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt index cc4b4d04129..278771c9ad9 100644 --- a/Documentation/networking/pktgen.txt +++ b/Documentation/networking/pktgen.txt @@ -109,6 +109,22 @@ Examples: cycle through the port range. pgset "udp_dst_max 9" set UDP destination port max. + pgset "mpls 0001000a,0002000a,0000000a" set MPLS labels (in this example + outer label=16,middle label=32, + inner label=0 (IPv4 NULL)) Note that + there must be no spaces between the + arguments. Leading zeros are required. + Do not set the bottom of stack bit, + thats done automatically. If you do + set the bottom of stack bit, that + indicates that you want to randomly + generate that address and the flag + MPLS_RND will be turned on. You + can have any mix of random and fixed + labels in the label stack. + + pgset "mpls 0" turn off mpls (or any invalid argument works too!) + pgset stop aborts injection. Also, ^C aborts generator. @@ -167,6 +183,8 @@ pkt_size min_pkt_size max_pkt_size +mpls + udp_src_min udp_src_max @@ -211,4 +229,4 @@ Grant Grundler for testing on IA-64 and parisc, Harald Welte, Lennert Buytenhek Stephen Hemminger, Andi Kleen, Dave Miller and many others. -Good luck with the linux net-development.
\ No newline at end of file +Good luck with the linux net-development. diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt index b28b7f04abb..d7814a113ee 100644 --- a/Documentation/power/swsusp.txt +++ b/Documentation/power/swsusp.txt @@ -17,6 +17,11 @@ Some warnings, first. * but it will probably only crash. * * (*) suspend/resume support is needed to make it safe. + * + * If you have any filesystems on USB devices mounted before suspend, + * they won't be accessible after resume and you may lose data, as though + * you have unplugged the USB devices with mounted filesystems on them + * (see the FAQ below for details). You need to append resume=/dev/your_swap_partition to kernel command line. Then you suspend by @@ -27,19 +32,18 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state echo platform > /sys/power/disk; echo disk > /sys/power/state +. If you have SATA disks, you'll need recent kernels with SATA suspend +support. For suspend and resume to work, make sure your disk drivers +are built into kernel -- not modules. [There's way to make +suspend/resume with modular disk drivers, see FAQ, but you probably +should not do that.] + If you want to limit the suspend image size to N bytes, do echo N > /sys/power/image_size before suspend (it is limited to 500 MB by default). -Encrypted suspend image: ------------------------- -If you want to store your suspend image encrypted with a temporary -key to prevent data gathering after resume you must compile -crypto and the aes algorithm into the kernel - modules won't work -as they cannot be loaded at resume time. - Article about goals and implementation of Software Suspend for Linux ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -333,4 +337,37 @@ init=/bin/bash, then swapon and starting suspend sequence manually usually does the trick. Then it is good idea to try with latest vanilla kernel. +Q: How can distributions ship a swsusp-supporting kernel with modular +disk drivers (especially SATA)? + +A: Well, it can be done, load the drivers, then do echo into +/sys/power/disk/resume file from initrd. Be sure not to mount +anything, not even read-only mount, or you are going to lose your +data. + +Q: How do I make suspend more verbose? + +A: If you want to see any non-error kernel messages on the virtual +terminal the kernel switches to during suspend, you have to set the +kernel console loglevel to at least 5, for example by doing + + echo 5 > /proc/sys/kernel/printk + +Q: Is this true that if I have a mounted filesystem on a USB device and +I suspend to disk, I can lose data unless the filesystem has been mounted +with "sync"? + +A: That's right. It depends on your hardware, and it could be true even for +suspend-to-RAM. In fact, even with "-o sync" you can lose data if your +programs have information in buffers they haven't written out to disk. + +If you're lucky, your hardware will support low-power modes for USB +controllers while the system is asleep. Lots of hardware doesn't, +however. Shutting off the power to a USB controller is equivalent to +unplugging all the attached devices. + +Remember that it's always a bad idea to unplug a disk drive containing a +mounted filesystem. With USB that's true even when your system is asleep! +The safest thing is to unmount all USB-based filesystems before suspending +and remount them after resuming. diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt new file mode 100644 index 00000000000..94058220aaf --- /dev/null +++ b/Documentation/power/userland-swsusp.txt @@ -0,0 +1,149 @@ +Documentation for userland software suspend interface + (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> + +First, the warnings at the beginning of swsusp.txt still apply. + +Second, you should read the FAQ in swsusp.txt _now_ if you have not +done it already. + +Now, to use the userland interface for software suspend you need special +utilities that will read/write the system memory snapshot from/to the +kernel. Such utilities are available, for example, from +<http://www.sisk.pl/kernel/utilities/suspend>. You may want to have +a look at them if you are going to develop your own suspend/resume +utilities. + +The interface consists of a character device providing the open(), +release(), read(), and write() operations as well as several ioctl() +commands defined in kernel/power/power.h. The major and minor +numbers of the device are, respectively, 10 and 231, and they can +be read from /sys/class/misc/snapshot/dev. + +The device can be open either for reading or for writing. If open for +reading, it is considered to be in the suspend mode. Otherwise it is +assumed to be in the resume mode. The device cannot be open for reading +and writing. It is also impossible to have the device open more than once +at a time. + +The ioctl() commands recognized by the device are: + +SNAPSHOT_FREEZE - freeze user space processes (the current process is + not frozen); this is required for SNAPSHOT_ATOMIC_SNAPSHOT + and SNAPSHOT_ATOMIC_RESTORE to succeed + +SNAPSHOT_UNFREEZE - thaw user space processes frozen by SNAPSHOT_FREEZE + +SNAPSHOT_ATOMIC_SNAPSHOT - create a snapshot of the system memory; the + last argument of ioctl() should be a pointer to an int variable, + the value of which will indicate whether the call returned after + creating the snapshot (1) or after restoring the system memory state + from it (0) (after resume the system finds itself finishing the + SNAPSHOT_ATOMIC_SNAPSHOT ioctl() again); after the snapshot + has been created the read() operation can be used to transfer + it out of the kernel + +SNAPSHOT_ATOMIC_RESTORE - restore the system memory state from the + uploaded snapshot image; before calling it you should transfer + the system memory snapshot back to the kernel using the write() + operation; this call will not succeed if the snapshot + image is not available to the kernel + +SNAPSHOT_FREE - free memory allocated for the snapshot image + +SNAPSHOT_SET_IMAGE_SIZE - set the preferred maximum size of the image + (the kernel will do its best to ensure the image size will not exceed + this number, but if it turns out to be impossible, the kernel will + create the smallest image possible) + +SNAPSHOT_AVAIL_SWAP - return the amount of available swap in bytes (the last + argument should be a pointer to an unsigned int variable that will + contain the result if the call is successful). + +SNAPSHOT_GET_SWAP_PAGE - allocate a swap page from the resume partition + (the last argument should be a pointer to a loff_t variable that + will contain the swap page offset if the call is successful) + +SNAPSHOT_FREE_SWAP_PAGES - free all swap pages allocated with + SNAPSHOT_GET_SWAP_PAGE + +SNAPSHOT_SET_SWAP_FILE - set the resume partition (the last ioctl() argument + should specify the device's major and minor numbers in the old + two-byte format, as returned by the stat() function in the .st_rdev + member of the stat structure); it is recommended to always use this + call, because the code to set the resume partition could be removed from + future kernels + +The device's read() operation can be used to transfer the snapshot image from +the kernel. It has the following limitations: +- you cannot read() more than one virtual memory page at a time +- read()s accross page boundaries are impossible (ie. if ypu read() 1/2 of + a page in the previous call, you will only be able to read() + _at_ _most_ 1/2 of the page in the next call) + +The device's write() operation is used for uploading the system memory snapshot +into the kernel. It has the same limitations as the read() operation. + +The release() operation frees all memory allocated for the snapshot image +and all swap pages allocated with SNAPSHOT_GET_SWAP_PAGE (if any). +Thus it is not necessary to use either SNAPSHOT_FREE or +SNAPSHOT_FREE_SWAP_PAGES before closing the device (in fact it will also +unfreeze user space processes frozen by SNAPSHOT_UNFREEZE if they are +still frozen when the device is being closed). + +Currently it is assumed that the userland utilities reading/writing the +snapshot image from/to the kernel will use a swap parition, called the resume +partition, as storage space. However, this is not really required, as they +can use, for example, a special (blank) suspend partition or a file on a partition +that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and mounted afterwards. + +These utilities SHOULD NOT make any assumptions regarding the ordering of +data within the snapshot image, except for the image header that MAY be +assumed to start with an swsusp_info structure, as specified in +kernel/power/power.h. This structure MAY be used by the userland utilities +to obtain some information about the snapshot image, such as the size +of the snapshot image, including the metadata and the header itself, +contained in the .size member of swsusp_info. + +The snapshot image MUST be written to the kernel unaltered (ie. all of the image +data, metadata and header MUST be written in _exactly_ the same amount, form +and order in which they have been read). Otherwise, the behavior of the +resumed system may be totally unpredictable. + +While executing SNAPSHOT_ATOMIC_RESTORE the kernel checks if the +structure of the snapshot image is consistent with the information stored +in the image header. If any inconsistencies are detected, +SNAPSHOT_ATOMIC_RESTORE will not succeed. Still, this is not a fool-proof +mechanism and the userland utilities using the interface SHOULD use additional +means, such as checksums, to ensure the integrity of the snapshot image. + +The suspending and resuming utilities MUST lock themselves in memory, +preferrably using mlockall(), before calling SNAPSHOT_FREEZE. + +The suspending utility MUST check the value stored by SNAPSHOT_ATOMIC_SNAPSHOT +in the memory location pointed to by the last argument of ioctl() and proceed +in accordance with it: +1. If the value is 1 (ie. the system memory snapshot has just been + created and the system is ready for saving it): + (a) The suspending utility MUST NOT close the snapshot device + _unless_ the whole suspend procedure is to be cancelled, in + which case, if the snapshot image has already been saved, the + suspending utility SHOULD destroy it, preferrably by zapping + its header. If the suspend is not to be cancelled, the + system MUST be powered off or rebooted after the snapshot + image has been saved. + (b) The suspending utility SHOULD NOT attempt to perform any + file system operations (including reads) on the file systems + that were mounted before SNAPSHOT_ATOMIC_SNAPSHOT has been + called. However, it MAY mount a file system that was not + mounted at that time and perform some operations on it (eg. + use it for saving the image). +2. If the value is 0 (ie. the system state has just been restored from + the snapshot image), the suspending utility MUST close the snapshot + device. Afterwards it will be treated as a regular userland process, + so it need not exit. + +The resuming utility SHOULD NOT attempt to mount any file systems that could +be mounted before suspend and SHOULD NOT attempt to perform any operations +involving such file systems. + +For details, please refer to the source code. diff --git a/Documentation/power/video.txt b/Documentation/power/video.txt index 912bed87c75..d18a57d1a53 100644 --- a/Documentation/power/video.txt +++ b/Documentation/power/video.txt @@ -1,7 +1,7 @@ Video issues with S3 resume ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 2003-2005, Pavel Machek + 2003-2006, Pavel Machek During S3 resume, hardware needs to be reinitialized. For most devices, this is easy, and kernel driver knows how to do @@ -15,6 +15,27 @@ run normally so video card is normally initialized. It should not be problem for S1 standby, because hardware should retain its state over that. +We either have to run video BIOS during early resume, or interpret it +using vbetool later, or maybe nothing is neccessary on particular +system because video state is preserved. Unfortunately different +methods work on different systems, and no known method suits all of +them. + +Userland application called s2ram has been developed; it contains long +whitelist of systems, and automatically selects working method for a +given system. It can be downloaded from CVS at +www.sf.net/projects/suspend . If you get a system that is not in the +whitelist, please try to find a working solution, and submit whitelist +entry so that work does not need to be repeated. + +Currently, VBE_SAVE method (6 below) works on most +systems. Unfortunately, vbetool only runs after userland is resumed, +so it makes debugging of early resume problems +hard/impossible. Methods that do not rely on userland are preferable. + +Details +~~~~~~~ + There are a few types of systems where video works after S3 resume: (1) systems where video state is preserved over S3. @@ -104,6 +125,7 @@ HP NX7000 ??? (*) HP Pavilion ZD7000 vbetool post needed, need open-source nv driver for X HP Omnibook XE3 athlon version none (1) HP Omnibook XE3GC none (1), video is S3 Savage/IX-MV +HP Omnibook 5150 none (1), (S1 also works OK) IBM TP T20, model 2647-44G none (1), video is S3 Inc. 86C270-294 Savage/IX-MV, vesafb gets "interesting" but X work. IBM TP A31 / Type 2652-M5G s3_mode (3) [works ok with BIOS 1.04 2002-08-23, but not at all with BIOS 1.11 2004-11-05 :-(] IBM TP R32 / Type 2658-MMG none (1) @@ -120,18 +142,24 @@ IBM ThinkPad T42p (2373-GTG) s3_bios (2) IBM TP X20 ??? (*) IBM TP X30 s3_bios (2) IBM TP X31 / Type 2672-XXH none (1), use radeontool (http://fdd.com/software/radeon/) to turn off backlight. -IBM TP X32 none (1), but backlight is on and video is trashed after long suspend +IBM TP X32 none (1), but backlight is on and video is trashed after long suspend. s3_bios,s3_mode (4) works too. Perhaps that gets better results? IBM Thinkpad X40 Type 2371-7JG s3_bios,s3_mode (4) +IBM TP 600e none(1), but a switch to console and back to X is needed Medion MD4220 ??? (*) Samsung P35 vbetool needed (6) -Sharp PC-AR10 (ATI rage) none (1) +Sharp PC-AR10 (ATI rage) none (1), backlight does not switch off Sony Vaio PCG-C1VRX/K s3_bios (2) Sony Vaio PCG-F403 ??? (*) +Sony Vaio PCG-GRT995MP none (1), works with 'nv' X driver +Sony Vaio PCG-GR7/K none (1), but needs radeonfb, use radeontool (http://fdd.com/software/radeon/) to turn off backlight. Sony Vaio PCG-N505SN ??? (*) Sony Vaio vgn-s260 X or boot-radeon can init it (5) +Sony Vaio vgn-S580BH vga=normal, but suspend from X. Console will be blank unless you return to X. +Sony Vaio vgn-FS115B s3_bios (2),s3_mode (4) Toshiba Libretto L5 none (1) -Toshiba Satellite 4030CDT s3_mode (3) -Toshiba Satellite 4080XCDT s3_mode (3) +Toshiba Portege 3020CT s3_mode (3) +Toshiba Satellite 4030CDT s3_mode (3) (S1 also works OK) +Toshiba Satellite 4080XCDT s3_mode (3) (S1 also works OK) Toshiba Satellite 4090XCDT ??? (*) Toshiba Satellite P10-554 s3_bios,s3_mode (4)(****) Toshiba M30 (2) xor X with nvidia driver using internal AGP @@ -151,39 +179,3 @@ Asus A7V8X nVidia RIVA TNT2 model 64 s3_bios,s3_mode (4) (***) To be tested with a newer kernel. (****) Not with SMP kernel, UP only. - -VBEtool details -~~~~~~~~~~~~~~~ -(with thanks to Carl-Daniel Hailfinger) - -First, boot into X and run the following script ONCE: -#!/bin/bash -statedir=/root/s3/state -mkdir -p $statedir -chvt 2 -sleep 1 -vbetool vbestate save >$statedir/vbe - - -To suspend and resume properly, call the following script as root: -#!/bin/bash -statedir=/root/s3/state -curcons=`fgconsole` -fuser /dev/tty$curcons 2>/dev/null|xargs ps -o comm= -p|grep -q X && chvt 2 -cat /dev/vcsa >$statedir/vcsa -sync -echo 3 >/proc/acpi/sleep -sync -vbetool post -vbetool vbestate restore <$statedir/vbe -cat $statedir/vcsa >/dev/vcsa -rckbd restart -chvt $[curcons%6+1] -chvt $curcons - - -Unless you change your graphics card or other hardware configuration, -the state once saved will be OK for every resume afterwards. -NOTE: The "rckbd restart" command may be different for your -distribution. Simply replace it with the command you would use to -set the fonts on screen. diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 30deaf1b728..b504def3e34 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -52,9 +52,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } @@ -67,9 +66,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_printf(p, " %14s", irq_desc[i].handler->typename); seq_printf(p, " %s", action->name); diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 27ab4c30aac..11fa326a8f6 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -75,9 +75,8 @@ int show_interrupts(struct seq_file *p, void *v) switch (i) { case 0: seq_printf(p, " "); - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); break; @@ -100,9 +99,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); #endif level = group->sources[ix]->level - frv_irq_levels; diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 5b1a7d46d1d..bfea1bedcbf 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -80,6 +80,7 @@ config X86_VOYAGER config X86_NUMAQ bool "NUMAQ (IBM/Sequent)" + select SMP select NUMA help This option is used for getting Linux to run on a (IBM/Sequent) NUMA @@ -400,6 +401,7 @@ choice config NOHIGHMEM bool "off" + depends on !X86_NUMAQ ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 @@ -436,6 +438,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" + depends on !X86_NUMAQ help Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. @@ -503,10 +506,6 @@ config NUMA default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) -# Need comments to help the hapless user trying to turn on NUMA support -comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" - depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) - comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI) @@ -660,13 +659,18 @@ config BOOT_IOREMAP default y config REGPARM - bool "Use register arguments (EXPERIMENTAL)" - depends on EXPERIMENTAL - default n + bool "Use register arguments" + default y help - Compile the kernel with -mregparm=3. This uses a different ABI - and passes the first three arguments of a function call in registers. - This will probably break binary only modules. + Compile the kernel with -mregparm=3. This instructs gcc to use + a more efficient function call ABI which passes the first three + arguments of a function call via registers, which results in denser + and faster code. + + If this option is disabled, then the default ABI of passing + arguments via the stack is used. + + If unsure, say Y. config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug index bf32ecc9ad0..00108ba9a78 100644 --- a/arch/i386/Kconfig.debug +++ b/arch/i386/Kconfig.debug @@ -31,6 +31,15 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. +config STACK_BACKTRACE_COLS + int "Stack backtraces per line" if DEBUG_KERNEL + range 1 3 + default 2 + help + Selects how many stack backtrace entries per line to display. + + This can save screen space when displaying traces. + comment "Page alloc debug is incompatible with Software Suspend on i386" depends on DEBUG_KERNEL && SOFTWARE_SUSPEND diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 65656c033d7..5b9ed21216c 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ - quirks.o i8237.o topology.o + quirks.o i8237.o topology.o alternative.o obj-y += cpu/ obj-y += timers/ diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c new file mode 100644 index 00000000000..5cbd6f99fb2 --- /dev/null +++ b/arch/i386/kernel/alternative.c @@ -0,0 +1,321 @@ +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <asm/alternative.h> +#include <asm/sections.h> + +#define DEBUG 0 +#if DEBUG +# define DPRINTK(fmt, args...) printk(fmt, args) +#else +# define DPRINTK(fmt, args...) +#endif + +/* Use inline assembly to define this because the nops are defined + as inline assembly strings in the include files and we cannot + get them easily into strings. */ +asm("\t.data\nintelnops: " + GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 + GENERIC_NOP7 GENERIC_NOP8); +asm("\t.data\nk8nops: " + K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 + K8_NOP7 K8_NOP8); +asm("\t.data\nk7nops: " + K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 + K7_NOP7 K7_NOP8); + +extern unsigned char intelnops[], k8nops[], k7nops[]; +static unsigned char *intel_nops[ASM_NOP_MAX+1] = { + NULL, + intelnops, + intelnops + 1, + intelnops + 1 + 2, + intelnops + 1 + 2 + 3, + intelnops + 1 + 2 + 3 + 4, + intelnops + 1 + 2 + 3 + 4 + 5, + intelnops + 1 + 2 + 3 + 4 + 5 + 6, + intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, +}; +static unsigned char *k8_nops[ASM_NOP_MAX+1] = { + NULL, + k8nops, + k8nops + 1, + k8nops + 1 + 2, + k8nops + 1 + 2 + 3, + k8nops + 1 + 2 + 3 + 4, + k8nops + 1 + 2 + 3 + 4 + 5, + k8nops + 1 + 2 + 3 + 4 + 5 + 6, + k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, +}; +static unsigned char *k7_nops[ASM_NOP_MAX+1] = { + NULL, + k7nops, + k7nops + 1, + k7nops + 1 + 2, + k7nops + 1 + 2 + 3, + k7nops + 1 + 2 + 3 + 4, + k7nops + 1 + 2 + 3 + 4 + 5, + k7nops + 1 + 2 + 3 + 4 + 5 + 6, + k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, +}; +static struct nop { + int cpuid; + unsigned char **noptable; +} noptypes[] = { + { X86_FEATURE_K8, k8_nops }, + { X86_FEATURE_K7, k7_nops }, + { -1, NULL } +}; + + +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; +extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[]; +extern u8 *__smp_locks[], *__smp_locks_end[]; + +extern u8 __smp_alt_begin[], __smp_alt_end[]; + + +static unsigned char** find_nop_table(void) +{ + unsigned char **noptable = intel_nops; + int i; + + for (i = 0; noptypes[i].cpuid >= 0; i++) { + if (boot_cpu_has(noptypes[i].cpuid)) { + noptable = noptypes[i].noptable; + break; + } + } + return noptable; +} + +/* Replace instructions with better alternatives for this CPU type. + This runs before SMP is initialized to avoid SMP problems with + self modifying code. This implies that assymetric systems where + APs have less capabilities than the boot processor are not handled. + Tough. Make sure you disable such features by hand. */ + +void apply_alternatives(struct alt_instr *start, struct alt_instr *end) +{ + unsigned char **noptable = find_nop_table(); + struct alt_instr *a; + int diff, i, k; + + DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); + for (a = start; a < end; a++) { + BUG_ON(a->replacementlen > a->instrlen); + if (!boot_cpu_has(a->cpuid)) + continue; + memcpy(a->instr, a->replacement, a->replacementlen); + diff = a->instrlen - a->replacementlen; + /* Pad the rest with nops */ + for (i = a->replacementlen; diff > 0; diff -= k, i += k) { + k = diff; + if (k > ASM_NOP_MAX) + k = ASM_NOP_MAX; + memcpy(a->instr + i, noptable[k], k); + } + } +} + +static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end) +{ + struct alt_instr *a; + + DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end); + for (a = start; a < end; a++) { + memcpy(a->replacement + a->replacementlen, + a->instr, + a->instrlen); + } +} + +static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end) +{ + struct alt_instr *a; + + for (a = start; a < end; a++) { + memcpy(a->instr, + a->replacement + a->replacementlen, + a->instrlen); + } +} + +static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) +{ + u8 **ptr; + + for (ptr = start; ptr < end; ptr++) { + if (*ptr < text) + continue; + if (*ptr > text_end) + continue; + **ptr = 0xf0; /* lock prefix */ + }; +} + +static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) +{ + unsigned char **noptable = find_nop_table(); + u8 **ptr; + + for (ptr = start; ptr < end; ptr++) { + if (*ptr < text) + continue; + if (*ptr > text_end) + continue; + **ptr = noptable[1][0]; + }; +} + +struct smp_alt_module { + /* what is this ??? */ + struct module *mod; + char *name; + + /* ptrs to lock prefixes */ + u8 **locks; + u8 **locks_end; + + /* .text segment, needed to avoid patching init code ;) */ + u8 *text; + u8 *text_end; + + struct list_head next; +}; +static LIST_HEAD(smp_alt_modules); +static DEFINE_SPINLOCK(smp_alt); + +static int smp_alt_once = 0; +static int __init bootonly(char *str) +{ + smp_alt_once = 1; + return 1; +} +__setup("smp-alt-boot", bootonly); + +void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end) +{ + struct smp_alt_module *smp; + unsigned long flags; + + if (smp_alt_once) { + if (boot_cpu_has(X86_FEATURE_UP)) + alternatives_smp_unlock(locks, locks_end, + text, text_end); + return; + } + + smp = kzalloc(sizeof(*smp), GFP_KERNEL); + if (NULL == smp) + return; /* we'll run the (safe but slow) SMP code then ... */ + + smp->mod = mod; + smp->name = name; + smp->locks = locks; + smp->locks_end = locks_end; + smp->text = text; + smp->text_end = text_end; + DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", + __FUNCTION__, smp->locks, smp->locks_end, + smp->text, smp->text_end, smp->name); + + spin_lock_irqsave(&smp_alt, flags); + list_add_tail(&smp->next, &smp_alt_modules); + if (boot_cpu_has(X86_FEATURE_UP)) + alternatives_smp_unlock(smp->locks, smp->locks_end, + smp->text, smp->text_end); + spin_unlock_irqrestore(&smp_alt, flags); +} + +void alternatives_smp_module_del(struct module *mod) +{ + struct smp_alt_module *item; + unsigned long flags; + + if (smp_alt_once) + return; + + spin_lock_irqsave(&smp_alt, flags); + list_for_each_entry(item, &smp_alt_modules, next) { + if (mod != item->mod) + continue; + list_del(&item->next); + spin_unlock_irqrestore(&smp_alt, flags); + DPRINTK("%s: %s\n", __FUNCTION__, item->name); + kfree(item); + return; + } + spin_unlock_irqrestore(&smp_alt, flags); +} + +void alternatives_smp_switch(int smp) +{ + struct smp_alt_module *mod; + unsigned long flags; + + if (smp_alt_once) + return; + BUG_ON(!smp && (num_online_cpus() > 1)); + + spin_lock_irqsave(&smp_alt, flags); + if (smp) { + printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); + clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); + clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); + alternatives_smp_apply(__smp_alt_instructions, + __smp_alt_instructions_end); + list_for_each_entry(mod, &smp_alt_modules, next) + alternatives_smp_lock(mod->locks, mod->locks_end, + mod->text, mod->text_end); + } else { + printk(KERN_INFO "SMP alternatives: switching to UP code\n"); + set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); + set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); + apply_alternatives(__smp_alt_instructions, + __smp_alt_instructions_end); + list_for_each_entry(mod, &smp_alt_modules, next) + alternatives_smp_unlock(mod->locks, mod->locks_end, + mod->text, mod->text_end); + } + spin_unlock_irqrestore(&smp_alt, flags); +} + +void __init alternative_instructions(void) +{ + apply_alternatives(__alt_instructions, __alt_instructions_end); + + /* switch to patch-once-at-boottime-only mode and free the + * tables in case we know the number of CPUs will never ever + * change */ +#ifdef CONFIG_HOTPLUG_CPU + if (num_possible_cpus() < 2) + smp_alt_once = 1; +#else + smp_alt_once = 1; +#endif + + if (smp_alt_once) { + if (1 == num_possible_cpus()) { + printk(KERN_INFO "SMP alternatives: switching to UP code\n"); + set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); + set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); + apply_alternatives(__smp_alt_instructions, + __smp_alt_instructions_end); + alternatives_smp_unlock(__smp_locks, __smp_locks_end, + _text, _etext); + } + free_init_pages("SMP alternatives", + (unsigned long)__smp_alt_begin, + (unsigned long)__smp_alt_end); + } else { + alternatives_smp_save(__smp_alt_instructions, + __smp_alt_instructions_end); + alternatives_smp_module_add(NULL, "core kernel", + __smp_locks, __smp_locks_end, + _text, _etext); + alternatives_smp_switch(0); + } +} diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 776c90989e0..eb5279d23b7 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -38,6 +38,7 @@ #include <asm/i8253.h> #include <mach_apic.h> +#include <mach_apicdef.h> #include <mach_ipi.h> #include "io_ports.h" diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c index f52669ecb93..bd75629dd26 100644 --- a/arch/i386/kernel/cpu/centaur.c +++ b/arch/i386/kernel/cpu/centaur.c @@ -4,6 +4,7 @@ #include <asm/processor.h> #include <asm/msr.h> #include <asm/e820.h> +#include <asm/mtrr.h> #include "cpu.h" #ifdef CONFIG_X86_OOSTORE diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index e6bd095ae10..7e3d6b6a4e9 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -25,9 +25,10 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); -static int cachesize_override __devinitdata = -1; -static int disable_x86_fxsr __devinitdata = 0; -static int disable_x86_serial_nr __devinitdata = 1; +static int cachesize_override __cpuinitdata = -1; +static int disable_x86_fxsr __cpuinitdata; +static int disable_x86_serial_nr __cpuinitdata = 1; +static int disable_x86_sep __cpuinitdata; struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; @@ -59,7 +60,7 @@ static int __init cachesize_setup(char *str) } __setup("cachesize=", cachesize_setup); -int __devinit get_model_name(struct cpuinfo_x86 *c) +int __cpuinit get_model_name(struct cpuinfo_x86 *c) { unsigned int *v; char *p, *q; @@ -89,7 +90,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c) } -void __devinit display_cacheinfo(struct cpuinfo_x86 *c) +void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) { unsigned int n, dummy, ecx, edx, l2size; @@ -130,7 +131,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c) /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ /* Look up CPU names by table lookup. */ -static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) +static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) { struct cpu_model_info *info; @@ -151,7 +152,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) } -static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) +static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) { char *v = c->x86_vendor_id; int i; @@ -187,6 +188,14 @@ static int __init x86_fxsr_setup(char * s) __setup("nofxsr", x86_fxsr_setup); +static int __init x86_sep_setup(char * s) +{ + disable_x86_sep = 1; + return 1; +} +__setup("nosep", x86_sep_setup); + + /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(u32 flag) { @@ -210,7 +219,7 @@ static inline int flag_is_changeable_p(u32 flag) /* Probe for the CPUID instruction */ -static int __devinit have_cpuid_p(void) +static int __cpuinit have_cpuid_p(void) { return flag_is_changeable_p(X86_EFLAGS_ID); } @@ -254,7 +263,7 @@ static void __init early_cpu_detect(void) } } -void __devinit generic_identify(struct cpuinfo_x86 * c) +void __cpuinit generic_identify(struct cpuinfo_x86 * c) { u32 tfms, xlvl; int junk; @@ -307,7 +316,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) #endif } -static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) +static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { /* Disable processor serial number */ @@ -335,7 +344,7 @@ __setup("serialnumber", x86_serial_nr_setup); /* * This does the hard work of actually picking apart the CPU stuff... */ -void __devinit identify_cpu(struct cpuinfo_x86 *c) +void __cpuinit identify_cpu(struct cpuinfo_x86 *c) { int i; @@ -405,6 +414,10 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) clear_bit(X86_FEATURE_XMM, c->x86_capability); } + /* SEP disabled? */ + if (disable_x86_sep) + clear_bit(X86_FEATURE_SEP, c->x86_capability); + if (disable_pse) clear_bit(X86_FEATURE_PSE, c->x86_capability); @@ -417,7 +430,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) else /* Last resort... */ sprintf(c->x86_model_id, "%02x/%02x", - c->x86_vendor, c->x86_model); + c->x86, c->x86_model); } /* Now the feature flags better reflect actual CPU features! */ @@ -453,7 +466,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) } #ifdef CONFIG_X86_HT -void __devinit detect_ht(struct cpuinfo_x86 *c) +void __cpuinit detect_ht(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; int index_msb, core_bits; @@ -500,7 +513,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c) } #endif -void __devinit print_cpu_info(struct cpuinfo_x86 *c) +void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) { char *vendor = NULL; @@ -523,7 +536,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c) printk("\n"); } -cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; +cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; /* This is hacky. :) * We're emulating future behavior. @@ -570,7 +583,7 @@ void __init early_cpu_init(void) * and IDT. We reload them nevertheless, this function acts as a * 'CPU state barrier', nothing should get across. */ -void __devinit cpu_init(void) +void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct tss_struct * t = &per_cpu(init_tss, cpu); @@ -670,7 +683,7 @@ void __devinit cpu_init(void) } #ifdef CONFIG_HOTPLUG_CPU -void __devinit cpu_uninit(void) +void __cpuinit cpu_uninit(void) { int cpu = raw_smp_processor_id(); cpu_clear(cpu, cpu_initialized); diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e11a09207ec..3d5110b65cc 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void) { unsigned int i, supported_cpus = 0; - for (i=0; i<NR_CPUS; i++) { - if (!cpu_online(i)) - continue; + for_each_cpu(i) { if (check_supported_cpu(i)) supported_cpus++; } diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 8c0120186b9..5386b29bb5a 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c @@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void); struct movsl_mask movsl_mask __read_mostly; #endif -void __devinit early_intel_workaround(struct cpuinfo_x86 *c) +void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) { if (c->x86_vendor != X86_VENDOR_INTEL) return; @@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c) * This is called before we do cpu ident work */ -int __devinit ppro_with_ram_bug(void) +int __cpuinit ppro_with_ram_bug(void) { /* Uses data from early_cpu_detect now */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && @@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void) * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ -static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) +static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) { unsigned long lo, hi; @@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) /* * find out the number of processor cores on the die */ -static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) +static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx; @@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) return 1; } -static void __devinit init_intel(struct cpuinfo_x86 *c) +static void __cpuinit init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; char *p = NULL; @@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) return size; } -static struct cpu_dev intel_cpu_dev __devinitdata = { +static struct cpu_dev intel_cpu_dev __cpuinitdata = { .c_vendor = "Intel", .c_ident = { "GenuineIntel" }, .c_models = { diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index ffe58cee0c4..ce61921369e 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -174,7 +174,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ - if (c->cpuid_level > 4) { + if (c->cpuid_level > 3) { static int is_initialized; if (is_initialized == 0) { @@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) } } } -static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) +static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; int sibling; diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index 89a85af33d2..5cfbd801169 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c @@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) /* Other (Linux-defined) */ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL, - "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index d49dbe8dc96..e3c5fca0aa8 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c @@ -105,7 +105,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) return 1; local_irq_disable(); - if (!user_mode(regs)) { + if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c index aeabb419686..7ec6cfa01fb 100644 --- a/arch/i386/kernel/efi.c +++ b/arch/i386/kernel/efi.c @@ -543,7 +543,7 @@ efi_initialize_iomem_resources(struct resource *code_resource, if ((md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) > 0x100000000ULL) continue; - res = alloc_bootmem_low(sizeof(struct resource)); + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); switch (md->type) { case EFI_RESERVED_TYPE: res->name = "Reserved Memory"; diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 4d704724b2f..cfc683f153b 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -226,6 +226,10 @@ ENTRY(system_call) pushl %eax # save orig_eax SAVE_ALL GET_THREAD_INFO(%ebp) + testl $TF_MASK,EFLAGS(%esp) + jz no_singlestep + orl $_TIF_SINGLESTEP,TI_flags(%ebp) +no_singlestep: # system call tracing in operation / emulation /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index e0b7c632efb..3debc2e2654 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -450,7 +450,6 @@ int_msg: .globl boot_gdt_descr .globl idt_descr -.globl cpu_gdt_descr ALIGN # early boot GDT descriptor (must use 1:1 address mapping) @@ -470,8 +469,6 @@ cpu_gdt_descr: .word GDT_ENTRIES*8-1 .long cpu_gdt_table - .fill NR_CPUS-1,8,0 # space for the other GDT descriptors - /* * The boot_gdt_table must mirror the equivalent in setup.S and is * used only for booting. @@ -485,7 +482,7 @@ ENTRY(boot_gdt_table) /* * The Global Descriptor Table contains 28 quadwords, per-CPU. */ - .align PAGE_SIZE_asm + .align L1_CACHE_BYTES ENTRY(cpu_gdt_table) .quad 0x0000000000000000 /* NULL descriptor */ .quad 0x0000000000000000 /* 0x0b reserved */ diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 39d9a5fa907..311b4e7266f 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) { int i, j; Dprintk("Rotating IRQs among CPUs.\n"); - for (i = 0; i < NR_CPUS; i++) { - for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { + for_each_online_cpu(i) { + for (j = 0; j < NR_IRQS; j++) { if (!irq_desc[j].action) continue; /* Is it a significant load ? */ @@ -381,7 +381,7 @@ static void do_irq_balance(void) unsigned long imbalance = 0; cpumask_t allowed_mask, target_cpu_mask, tmp; - for (i = 0; i < NR_CPUS; i++) { + for_each_cpu(i) { int package_index; CPU_IRQ(i) = 0; if (!cpu_online(i)) @@ -422,9 +422,7 @@ static void do_irq_balance(void) } } /* Find the least loaded processor package */ - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_online(i)) - continue; + for_each_online_cpu(i) { if (i != CPU_TO_PACKAGEINDEX(i)) continue; if (min_cpu_irq > CPU_IRQ(i)) { @@ -441,9 +439,7 @@ tryanothercpu: */ tmp_cpu_irq = 0; tmp_loaded = -1; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_online(i)) - continue; + for_each_online_cpu(i) { if (i != CPU_TO_PACKAGEINDEX(i)) continue; if (max_cpu_irq <= CPU_IRQ(i)) @@ -619,9 +615,7 @@ static int __init balanced_irq_init(void) if (smp_num_siblings > 1 && !cpus_empty(tmp)) physical_balance = 1; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_online(i)) - continue; + for_each_online_cpu(i) { irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { @@ -638,9 +632,11 @@ static int __init balanced_irq_init(void) else printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); failed: - for (i = 0; i < NR_CPUS; i++) { + for_each_cpu(i) { kfree(irq_cpu_data[i].irq_delta); + irq_cpu_data[i].irq_delta = NULL; kfree(irq_cpu_data[i].last_irq); + irq_cpu_data[i].last_irq = NULL; } return 0; } @@ -1761,7 +1757,8 @@ static void __init setup_ioapic_ids_from_mpc(void) * Don't check I/O APIC IDs for xAPIC systems. They have * no meaning without the serial APIC bus. */ - if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15)) + if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) return; /* * This is broken; anything with a real cpu count has to diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 694a1399763..7a59050242a 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -84,9 +84,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); free_insn_slot(p->ainsn.insn); - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); } static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c index 5149c8a621f..470cf97e7cd 100644 --- a/arch/i386/kernel/module.c +++ b/arch/i386/kernel/module.c @@ -104,26 +104,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, return -ENOEXEC; } -extern void apply_alternatives(void *start, void *end); - int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { - const Elf_Shdr *s; + const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - /* look for .altinstructions to patch */ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { - void *seg; - if (strcmp(".altinstructions", secstrings + s->sh_name)) - continue; - seg = (void *)s->sh_addr; - apply_alternatives(seg, seg + s->sh_size); - } + if (!strcmp(".text", secstrings + s->sh_name)) + text = s; + if (!strcmp(".altinstructions", secstrings + s->sh_name)) + alt = s; + if (!strcmp(".smp_locks", secstrings + s->sh_name)) + locks= s; + } + + if (alt) { + /* patch .altinstructions */ + void *aseg = (void *)alt->sh_addr; + apply_alternatives(aseg, aseg + alt->sh_size); + } + if (locks && text) { + void *lseg = (void *)locks->sh_addr; + void *tseg = (void *)text->sh_addr; + alternatives_smp_module_add(me, me->name, + lseg, lseg + locks->sh_size, + tseg, tseg + text->sh_size); + } return 0; } void module_arch_cleanup(struct module *mod) { + alternatives_smp_module_del(mod); } diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index e6e2f43db85..8d8aa9d1796 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -828,6 +828,8 @@ void __init find_smp_config (void) smp_scan_config(address, 0x400); } +int es7000_plat; + /* -------------------------------------------------------------------------- ACPI-based MP Configuration -------------------------------------------------------------------------- */ @@ -935,7 +937,8 @@ void __init mp_register_ioapic ( mp_ioapics[idx].mpc_apicaddr = address; set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) tmpid = io_apic_get_unique_id(idx, id); else tmpid = id; @@ -1011,8 +1014,6 @@ void __init mp_override_legacy_irq ( return; } -int es7000_plat; - void __init mp_config_acpi_legacy_irqs (void) { struct mpc_config_intsrc intsrc; diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index be87c5e2ee9..9074818b947 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void) local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - for (cpu = 0; cpu < NR_CPUS; cpu++) { + for_each_cpu(cpu) { #ifdef CONFIG_SMP /* Check cpu_callin_map here because that is set after the timer is started. */ @@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) * Just reset the alert counters, (other CPUs might be * spinning on locks we hold): */ - for (i = 0; i < NR_CPUS; i++) + for_each_cpu(i) alert_counter[i] = 0; /* @@ -543,7 +543,7 @@ void nmi_watchdog_tick (struct pt_regs * regs) /* * die_nmi will return ONLY if NOTIFY_STOP happens.. */ - die_nmi(regs, "NMI Watchdog detected LOCKUP"); + die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP"); } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 0480454ebff..299e6167408 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -295,7 +295,7 @@ void show_regs(struct pt_regs * regs) printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); print_symbol("EIP is at %s\n", regs->eip); - if (user_mode(regs)) + if (user_mode_vm(regs)) printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); printk(" EFLAGS: %08lx %s (%s %.*s)\n", regs->eflags, print_tainted(), system_utsname.release, diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 5c1fb6aada5..506462ef36a 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c @@ -34,10 +34,10 @@ /* * Determines which flags the user has access to [1 = access, 0 = no access]. - * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9). + * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9). * Also masks reserved bits (31-22, 15, 5, 3, 1). */ -#define FLAG_MASK 0x00054dd5 +#define FLAG_MASK 0x00050dd5 /* set's the trap flag. */ #define TRAP_FLAG 0x100 diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c index 7455ab64394..967dc74df9e 100644 --- a/arch/i386/kernel/semaphore.c +++ b/arch/i386/kernel/semaphore.c @@ -110,11 +110,11 @@ asm( ".align 4\n" ".globl __write_lock_failed\n" "__write_lock_failed:\n\t" - LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" + LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" "1: rep; nop\n\t" "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" "jne 1b\n\t" - LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" + LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" "jnz __write_lock_failed\n\t" "ret" ); @@ -124,11 +124,11 @@ asm( ".align 4\n" ".globl __read_lock_failed\n" "__read_lock_failed:\n\t" - LOCK "incl (%eax)\n" + LOCK_PREFIX "incl (%eax)\n" "1: rep; nop\n\t" "cmpl $1,(%eax)\n\t" "js 1b\n\t" - LOCK "decl (%eax)\n\t" + LOCK_PREFIX "decl (%eax)\n\t" "js __read_lock_failed\n\t" "ret" ); diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index ab62a9f4701..d313a11acaf 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -1288,7 +1288,7 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat struct resource *res; if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) continue; - res = alloc_bootmem_low(sizeof(struct resource)); + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); switch (e820.map[i].type) { case E820_RAM: res->name = "System RAM"; break; case E820_ACPI: res->name = "ACPI Tables"; break; @@ -1316,13 +1316,15 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat /* * Request address space for all standard resources + * + * This is called just before pcibios_assign_resources(), which is also + * an fs_initcall, but is linked in later (in arch/i386/pci/i386.c). */ -static void __init register_memory(void) +static int __init request_standard_resources(void) { - unsigned long gapstart, gapsize, round; - unsigned long long last; - int i; + int i; + printk("Setting up standard PCI resources\n"); if (efi_enabled) efi_initialize_iomem_resources(&code_resource, &data_resource); else @@ -1334,6 +1336,16 @@ static void __init register_memory(void) /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, &standard_io_resources[i]); + return 0; +} + +fs_initcall(request_standard_resources); + +static void __init register_memory(void) +{ + unsigned long gapstart, gapsize, round; + unsigned long long last; + int i; /* * Search for the bigest gap in the low 32 bits of the e820 @@ -1377,101 +1389,6 @@ static void __init register_memory(void) pci_mem_start, gapstart, gapsize); } -/* Use inline assembly to define this because the nops are defined - as inline assembly strings in the include files and we cannot - get them easily into strings. */ -asm("\t.data\nintelnops: " - GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 - GENERIC_NOP7 GENERIC_NOP8); -asm("\t.data\nk8nops: " - K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 - K8_NOP7 K8_NOP8); -asm("\t.data\nk7nops: " - K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 - K7_NOP7 K7_NOP8); - -extern unsigned char intelnops[], k8nops[], k7nops[]; -static unsigned char *intel_nops[ASM_NOP_MAX+1] = { - NULL, - intelnops, - intelnops + 1, - intelnops + 1 + 2, - intelnops + 1 + 2 + 3, - intelnops + 1 + 2 + 3 + 4, - intelnops + 1 + 2 + 3 + 4 + 5, - intelnops + 1 + 2 + 3 + 4 + 5 + 6, - intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, -}; -static unsigned char *k8_nops[ASM_NOP_MAX+1] = { - NULL, - k8nops, - k8nops + 1, - k8nops + 1 + 2, - k8nops + 1 + 2 + 3, - k8nops + 1 + 2 + 3 + 4, - k8nops + 1 + 2 + 3 + 4 + 5, - k8nops + 1 + 2 + 3 + 4 + 5 + 6, - k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, -}; -static unsigned char *k7_nops[ASM_NOP_MAX+1] = { - NULL, - k7nops, - k7nops + 1, - k7nops + 1 + 2, - k7nops + 1 + 2 + 3, - k7nops + 1 + 2 + 3 + 4, - k7nops + 1 + 2 + 3 + 4 + 5, - k7nops + 1 + 2 + 3 + 4 + 5 + 6, - k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, -}; -static struct nop { - int cpuid; - unsigned char **noptable; -} noptypes[] = { - { X86_FEATURE_K8, k8_nops }, - { X86_FEATURE_K7, k7_nops }, - { -1, NULL } -}; - -/* Replace instructions with better alternatives for this CPU type. - - This runs before SMP is initialized to avoid SMP problems with - self modifying code. This implies that assymetric systems where - APs have less capabilities than the boot processor are not handled. - Tough. Make sure you disable such features by hand. */ -void apply_alternatives(void *start, void *end) -{ - struct alt_instr *a; - int diff, i, k; - unsigned char **noptable = intel_nops; - for (i = 0; noptypes[i].cpuid >= 0; i++) { - if (boot_cpu_has(noptypes[i].cpuid)) { - noptable = noptypes[i].noptable; - break; - } - } - for (a = start; (void *)a < end; a++) { - if (!boot_cpu_has(a->cpuid)) - continue; - BUG_ON(a->replacementlen > a->instrlen); - memcpy(a->instr, a->replacement, a->replacementlen); - diff = a->instrlen - a->replacementlen; - /* Pad the rest with nops */ - for (i = a->replacementlen; diff > 0; diff -= k, i += k) { - k = diff; - if (k > ASM_NOP_MAX) - k = ASM_NOP_MAX; - memcpy(a->instr + i, noptable[k], k); - } - } -} - -void __init alternative_instructions(void) -{ - extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; - apply_alternatives(__alt_instructions, __alt_instructions_end); -} - static char * __init machine_specific_memory_setup(void); #ifdef CONFIG_MCA @@ -1554,6 +1471,16 @@ void __init setup_arch(char **cmdline_p) parse_cmdline_early(cmdline_p); +#ifdef CONFIG_EARLY_PRINTK + { + char *s = strstr(*cmdline_p, "earlyprintk="); + if (s) { + setup_early_printk(strchr(s, '=') + 1); + printk("early console enabled\n"); + } + } +#endif + max_low_pfn = setup_memory(); /* @@ -1578,19 +1505,6 @@ void __init setup_arch(char **cmdline_p) * NOTE: at this point the bootmem allocator is fully available. */ -#ifdef CONFIG_EARLY_PRINTK - { - char *s = strstr(*cmdline_p, "earlyprintk="); - if (s) { - extern void setup_early_printk(char *); - - setup_early_printk(strchr(s, '=') + 1); - printk("early console enabled\n"); - } - } -#endif - - dmi_scan_machine(); #ifdef CONFIG_X86_GENERICARCH diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 963616d364e..5c352c3a9e7 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c @@ -123,7 +123,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax err |= __get_user(tmp, &sc->seg); \ loadsegment(seg,tmp); } -#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \ +#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \ + X86_EFLAGS_OF | X86_EFLAGS_DF | \ X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) @@ -582,9 +583,6 @@ static void fastcall do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (try_to_freeze()) - goto no_signal; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = ¤t->saved_sigmask; else @@ -613,7 +611,6 @@ static void fastcall do_signal(struct pt_regs *regs) return; } -no_signal: /* Did we come from a system call? */ if (regs->orig_eax >= 0) { /* Restart the system call - no handlers present */ diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 7007e178379..4c470e99a74 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -899,6 +899,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu) unsigned short nmi_high = 0, nmi_low = 0; ++cpucount; + alternatives_smp_switch(1); /* * We can't use kernel_thread since we must avoid to @@ -1368,6 +1369,8 @@ void __cpu_die(unsigned int cpu) /* They ack this in play_dead by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { printk ("CPU %d is now offline\n", cpu); + if (1 == num_online_cpus()) + alternatives_smp_switch(0); return; } msleep(100); diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c index 67a0e1baa28..296355292c7 100644 --- a/arch/i386/kernel/topology.c +++ b/arch/i386/kernel/topology.c @@ -41,6 +41,15 @@ int arch_register_cpu(int num){ parent = &node_devices[node].node; #endif /* CONFIG_NUMA */ + /* + * CPU0 cannot be offlined due to several + * restrictions and assumptions in kernel. This basically + * doesnt add a control file, one cannot attempt to offline + * BSP. + */ + if (!num) + cpu_devices[num].cpu.no_control = 1; + return register_cpu(&cpu_devices[num].cpu, num, parent); } diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index b814dbdcc91..de5386b01d3 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -99,6 +99,8 @@ int register_die_notifier(struct notifier_block *nb) { int err = 0; unsigned long flags; + + vmalloc_sync_all(); spin_lock_irqsave(&die_notifier_lock, flags); err = notifier_chain_register(&i386die_chain, nb); spin_unlock_irqrestore(&die_notifier_lock, flags); @@ -112,12 +114,30 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) p < (void *)tinfo + THREAD_SIZE - 3; } -static void print_addr_and_symbol(unsigned long addr, char *log_lvl) +/* + * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line. + */ +static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, + int printed) { - printk(log_lvl); + if (!printed) + printk(log_lvl); + +#if CONFIG_STACK_BACKTRACE_COLS == 1 printk(" [<%08lx>] ", addr); +#else + printk(" <%08lx> ", addr); +#endif print_symbol("%s", addr); - printk("\n"); + + printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; + + if (printed) + printk(" "); + else + printk("\n"); + + return printed; } static inline unsigned long print_context_stack(struct thread_info *tinfo, @@ -125,20 +145,24 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, char *log_lvl) { unsigned long addr; + int printed = 0; /* nr of entries already printed on current line */ #ifdef CONFIG_FRAME_POINTER while (valid_stack_ptr(tinfo, (void *)ebp)) { addr = *(unsigned long *)(ebp + 4); - print_addr_and_symbol(addr, log_lvl); + printed = print_addr_and_symbol(addr, log_lvl, printed); ebp = *(unsigned long *)ebp; } #else while (valid_stack_ptr(tinfo, stack)) { addr = *stack++; if (__kernel_text_address(addr)) - print_addr_and_symbol(addr, log_lvl); + printed = print_addr_and_symbol(addr, log_lvl, printed); } #endif + if (printed) + printk("\n"); + return ebp; } @@ -166,8 +190,7 @@ static void show_trace_log_lvl(struct task_struct *task, stack = (unsigned long*)context->previous_esp; if (!stack) break; - printk(log_lvl); - printk(" =======================\n"); + printk("%s =======================\n", log_lvl); } } @@ -194,21 +217,17 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, for(i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; - if (i && ((i % 8) == 0)) { - printk("\n"); - printk(log_lvl); - printk(" "); - } + if (i && ((i % 8) == 0)) + printk("\n%s ", log_lvl); printk("%08lx ", *stack++); } - printk("\n"); - printk(log_lvl); - printk("Call Trace:\n"); + printk("\n%sCall Trace:\n", log_lvl); show_trace_log_lvl(task, esp, log_lvl); } void show_stack(struct task_struct *task, unsigned long *esp) { + printk(" "); show_stack_log_lvl(task, esp, ""); } @@ -233,7 +252,7 @@ void show_registers(struct pt_regs *regs) esp = (unsigned long) (®s->esp); savesegment(ss, ss); - if (user_mode(regs)) { + if (user_mode_vm(regs)) { in_kernel = 0; esp = regs->esp; ss = regs->xss & 0xffff; @@ -333,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err) static int die_counter; unsigned long flags; + oops_enter(); + if (die.lock_owner != raw_smp_processor_id()) { console_verbose(); spin_lock_irqsave(&die.lock, flags); @@ -385,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err) ssleep(5); panic("Fatal exception"); } + oops_exit(); do_exit(SIGSEGV); } @@ -623,7 +645,7 @@ void die_nmi (struct pt_regs *regs, const char *msg) /* If we are in kernel we are probably nested up pretty bad * and might aswell get out now while we still can. */ - if (!user_mode(regs)) { + if (!user_mode_vm(regs)) { current->thread.trap_no = 2; crash_kexec(regs); } @@ -694,6 +716,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code) void set_nmi_callback(nmi_callback_t callback) { + vmalloc_sync_all(); rcu_assign_pointer(nmi_callback, callback); } EXPORT_SYMBOL_GPL(set_nmi_callback); diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 4710195b6b7..3f21c6f6466 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S @@ -68,6 +68,26 @@ SECTIONS *(.data.init_task) } + /* might get freed after init */ + . = ALIGN(4096); + __smp_alt_begin = .; + __smp_alt_instructions = .; + .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) { + *(.smp_altinstructions) + } + __smp_alt_instructions_end = .; + . = ALIGN(4); + __smp_locks = .; + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + *(.smp_locks) + } + __smp_locks_end = .; + .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) { + *(.smp_altinstr_replacement) + } + . = ALIGN(4096); + __smp_alt_end = .; + /* will be freed after init */ . = ALIGN(4096); /* Init code and data */ __init_begin = .; diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 76b72815940..3b62baa6a37 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S @@ -21,6 +21,9 @@ * instruction clobbers %esp, the user's %esp won't even survive entry * into the kernel. We store %esp in %ebp. Code in entry.S must fetch * arg6 from the stack. + * + * You can not use this vsyscall for the clone() syscall because the + * three dwords on the parent stack do not get copied to the child. */ .text .globl __kernel_vsyscall diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h index f1e3204f5de..80566ca4a80 100644 --- a/arch/i386/mach-es7000/es7000.h +++ b/arch/i386/mach-es7000/es7000.h @@ -83,6 +83,7 @@ struct es7000_oem_table { struct psai psai; }; +#ifdef CONFIG_ACPI struct acpi_table_sdt { unsigned long pa; unsigned long count; @@ -99,6 +100,9 @@ struct oem_table { u32 OEMTableSize; }; +extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); +#endif + struct mip_reg { unsigned long long off_0; unsigned long long off_8; @@ -114,7 +118,6 @@ struct mip_reg { #define MIP_FUNC(VALUE) (VALUE & 0xff) extern int parse_unisys_oem (char *oemptr); -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void setup_unisys(void); extern int es7000_start_cpu(int cpu, unsigned long eip); extern void es7000_sw_apic(void); diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c index a9ab0644f40..3d0fc853516 100644 --- a/arch/i386/mach-es7000/es7000plat.c +++ b/arch/i386/mach-es7000/es7000plat.c @@ -51,8 +51,6 @@ struct mip_reg *host_reg; int mip_port; unsigned long mip_addr, host_addr; -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI) - /* * GSI override for ES7000 platforms. */ @@ -76,8 +74,6 @@ es7000_rename_gsi(int ioapic, int gsi) return gsi; } -#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */ - void __init setup_unisys(void) { @@ -160,6 +156,7 @@ parse_unisys_oem (char *oemptr) return es7000_plat; } +#ifdef CONFIG_ACPI int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { @@ -212,6 +209,7 @@ find_unisys_acpi_oem_table(unsigned long *oem_addr) } return -1; } +#endif static void es7000_spin(int n) diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index cf572d9a3b6..7f0fcf219a2 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -214,6 +214,68 @@ static noinline void force_sig_info_fault(int si_signo, int si_code, fastcall void do_invalid_op(struct pt_regs *, unsigned long); +static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) +{ + unsigned index = pgd_index(address); + pgd_t *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + + pgd += index; + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) + return NULL; + + /* + * set_pgd(pgd, *pgd_k); here would be useless on PAE + * and redundant with the set_pmd() on non-PAE. As would + * set_pud. + */ + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + return NULL; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; + if (!pmd_present(*pmd)) + set_pmd(pmd, *pmd_k); + else + BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + return pmd_k; +} + +/* + * Handle a fault on the vmalloc or module mapping area + * + * This assumes no large pages in there. + */ +static inline int vmalloc_fault(unsigned long address) +{ + unsigned long pgd_paddr; + pmd_t *pmd_k; + pte_t *pte_k; + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "current" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + pgd_paddr = read_cr3(); + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); + if (!pmd_k) + return -1; + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + return -1; + return 0; +} + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -223,6 +285,8 @@ fastcall void do_invalid_op(struct pt_regs *, unsigned long); * bit 0 == 0 means no page found, 1 means protection fault * bit 1 == 0 means read, 1 means write * bit 2 == 0 means kernel, 1 means user-mode + * bit 3 == 1 means use of reserved bit detected + * bit 4 == 1 means fault was an instruction fetch */ fastcall void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) @@ -237,13 +301,6 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, /* get the address */ address = read_cr2(); - if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, - SIGSEGV) == NOTIFY_STOP) - return; - /* It's safe to allow irq's after cr2 has been saved */ - if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) - local_irq_enable(); - tsk = current; si_code = SEGV_MAPERR; @@ -259,17 +316,29 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, * * This verifies that the fault happens in kernel space * (error_code & 4) == 0, and that the fault was not a - * protection error (error_code & 1) == 0. + * protection error (error_code & 9) == 0. */ - if (unlikely(address >= TASK_SIZE)) { - if (!(error_code & 5)) - goto vmalloc_fault; - /* + if (unlikely(address >= TASK_SIZE)) { + if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) + return; + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + SIGSEGV) == NOTIFY_STOP) + return; + /* * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock. */ goto bad_area_nosemaphore; - } + } + + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + SIGSEGV) == NOTIFY_STOP) + return; + + /* It's safe to allow irq's after cr2 has been saved and the vmalloc + fault has been handled. */ + if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) + local_irq_enable(); mm = tsk->mm; @@ -440,24 +509,31 @@ no_context: bust_spinlocks(1); -#ifdef CONFIG_X86_PAE - if (error_code & 16) { - pte_t *pte = lookup_address(address); + if (oops_may_print()) { + #ifdef CONFIG_X86_PAE + if (error_code & 16) { + pte_t *pte = lookup_address(address); - if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) - printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid); + if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) + printk(KERN_CRIT "kernel tried to execute " + "NX-protected page - exploit attempt? " + "(uid: %d)\n", current->uid); + } + #endif + if (address < PAGE_SIZE) + printk(KERN_ALERT "BUG: unable to handle kernel NULL " + "pointer dereference"); + else + printk(KERN_ALERT "BUG: unable to handle kernel paging" + " request"); + printk(" at virtual address %08lx\n",address); + printk(KERN_ALERT " printing eip:\n"); + printk("%08lx\n", regs->eip); } -#endif - if (address < PAGE_SIZE) - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); - else - printk(KERN_ALERT "Unable to handle kernel paging request"); - printk(" at virtual address %08lx\n",address); - printk(KERN_ALERT " printing eip:\n"); - printk("%08lx\n", regs->eip); page = read_cr3(); page = ((unsigned long *) __va(page))[address >> 22]; - printk(KERN_ALERT "*pde = %08lx\n", page); + if (oops_may_print()) + printk(KERN_ALERT "*pde = %08lx\n", page); /* * We must not directly access the pte in the highpte * case, the page table might be allocated in highmem. @@ -465,7 +541,7 @@ no_context: * it's allocated already. */ #ifndef CONFIG_HIGHPTE - if (page & 1) { + if ((page & 1) && oops_may_print()) { page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; @@ -510,51 +586,41 @@ do_sigbus: tsk->thread.error_code = error_code; tsk->thread.trap_no = 14; force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); - return; - -vmalloc_fault: - { - /* - * Synchronize this task's top level page-table - * with the 'reference' page table. - * - * Do _not_ use "tsk" here. We might be inside - * an interrupt in the middle of a task switch.. - */ - int index = pgd_index(address); - unsigned long pgd_paddr; - pgd_t *pgd, *pgd_k; - pud_t *pud, *pud_k; - pmd_t *pmd, *pmd_k; - pte_t *pte_k; - - pgd_paddr = read_cr3(); - pgd = index + (pgd_t *)__va(pgd_paddr); - pgd_k = init_mm.pgd + index; - - if (!pgd_present(*pgd_k)) - goto no_context; - - /* - * set_pgd(pgd, *pgd_k); here would be useless on PAE - * and redundant with the set_pmd() on non-PAE. As would - * set_pud. - */ +} - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); - if (!pud_present(*pud_k)) - goto no_context; - - pmd = pmd_offset(pud, address); - pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) - goto no_context; - set_pmd(pmd, *pmd_k); +#ifndef CONFIG_X86_PAE +void vmalloc_sync_all(void) +{ + /* + * Note that races in the updates of insync and start aren't + * problematic: insync can only get set bits added, and updates to + * start are only improving performance (without affecting correctness + * if undone). + */ + static DECLARE_BITMAP(insync, PTRS_PER_PGD); + static unsigned long start = TASK_SIZE; + unsigned long address; - pte_k = pte_offset_kernel(pmd_k, address); - if (!pte_present(*pte_k)) - goto no_context; - return; + BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); + for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { + if (!test_bit(pgd_index(address), insync)) { + unsigned long flags; + struct page *page; + + spin_lock_irqsave(&pgd_lock, flags); + for (page = pgd_list; page; page = + (struct page *)page->index) + if (!vmalloc_sync_one(page_address(page), + address)) { + BUG_ON(page != pgd_list); + break; + } + spin_unlock_irqrestore(&pgd_lock, flags); + if (!page) + set_bit(pgd_index(address), insync); + } + if (address == start && test_bit(pgd_index(address), insync)) + start = address + PGDIR_SIZE; } } +#endif diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 7ba55a6e2db..9f66ac582a8 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -720,21 +720,6 @@ static int noinline do_test_wp_bit(void) return flag; } -void free_initmem(void) -{ - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)addr, 0xcc, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); -} - #ifdef CONFIG_DEBUG_RODATA extern char __start_rodata, __end_rodata; @@ -758,17 +743,31 @@ void mark_rodata_ro(void) } #endif +void free_init_pages(char *what, unsigned long begin, unsigned long end) +{ + unsigned long addr; + + for (addr = begin; addr < end; addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + memset((void *)addr, 0xcc, PAGE_SIZE); + free_page(addr); + totalram_pages++; + } + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); +} + +void free_initmem(void) +{ + free_init_pages("unused kernel memory", + (unsigned long)(&__init_begin), + (unsigned long)(&__init_end)); +} #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start < end) - printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_init_pages("initrd memory", start, end); } #endif + diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 0493e8b8ec4..1accce50c2c 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c @@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy) static void free_msrs(void) { int i; - for (i = 0; i < NR_CPUS; ++i) { + for_each_cpu(i) { kfree(cpu_msrs[i].counters); cpu_msrs[i].counters = NULL; kfree(cpu_msrs[i].controls); @@ -138,10 +138,7 @@ static int allocate_msrs(void) size_t counters_size = sizeof(struct op_msr) * model->num_counters; int i; - for (i = 0; i < NR_CPUS; ++i) { - if (!cpu_online(i)) - continue; - + for_each_online_cpu(i) { cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); if (!cpu_msrs[i].counters) { success = 0; diff --git a/arch/i386/pci/Makefile b/arch/i386/pci/Makefile index 5461d4d5ea1..62ad75c57e6 100644 --- a/arch/i386/pci/Makefile +++ b/arch/i386/pci/Makefile @@ -1,4 +1,4 @@ -obj-y := i386.o +obj-y := i386.o init.o obj-$(CONFIG_PCI_BIOS) += pcbios.o obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c index f6bc48da4d2..dbece776c5b 100644 --- a/arch/i386/pci/common.c +++ b/arch/i386/pci/common.c @@ -8,6 +8,7 @@ #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> +#include <linux/dmi.h> #include <asm/acpi.h> #include <asm/segment.h> @@ -120,11 +121,42 @@ void __devinit pcibios_fixup_bus(struct pci_bus *b) pci_read_bridge_bases(b); } +/* + * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus) + */ +#ifdef __i386__ +static int __devinit assign_all_busses(struct dmi_system_id *d) +{ + pci_probe |= PCI_ASSIGN_ALL_BUSSES; + printk(KERN_INFO "%s detected: enabling PCI bus# renumbering" + " (pci=assign-busses)\n", d->ident); + return 0; +} +#endif + +/* + * Laptops which need pci=assign-busses to see Cardbus cards + */ +static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { +#ifdef __i386__ + { + .callback = assign_all_busses, + .ident = "Samsung X20 Laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), + DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"), + }, + }, +#endif /* __i386__ */ + {} +}; struct pci_bus * __devinit pcibios_scan_root(int busnum) { struct pci_bus *bus = NULL; + dmi_check_system(pciprobe_dmi_table); + while ((bus = pci_find_next_bus(bus)) != NULL) { if (bus->number == busnum) { /* Already scanned */ diff --git a/arch/i386/pci/direct.c b/arch/i386/pci/direct.c index e3ac502bf2f..99012b93bd1 100644 --- a/arch/i386/pci/direct.c +++ b/arch/i386/pci/direct.c @@ -245,7 +245,7 @@ static int __init pci_check_type2(void) return works; } -static int __init pci_direct_init(void) +void __init pci_direct_init(void) { struct resource *region, *region2; @@ -258,16 +258,16 @@ static int __init pci_direct_init(void) if (pci_check_type1()) { printk(KERN_INFO "PCI: Using configuration type 1\n"); raw_pci_ops = &pci_direct_conf1; - return 0; + return; } release_resource(region); type2: if ((pci_probe & PCI_PROBE_CONF2) == 0) - goto out; + return; region = request_region(0xCF8, 4, "PCI conf2"); if (!region) - goto out; + return; region2 = request_region(0xC000, 0x1000, "PCI conf2"); if (!region2) goto fail2; @@ -275,15 +275,10 @@ static int __init pci_direct_init(void) if (pci_check_type2()) { printk(KERN_INFO "PCI: Using configuration type 2\n"); raw_pci_ops = &pci_direct_conf2; - return 0; + return; } release_resource(region2); fail2: release_resource(region); - - out: - return 0; } - -arch_initcall(pci_direct_init); diff --git a/arch/i386/pci/init.c b/arch/i386/pci/init.c new file mode 100644 index 00000000000..f9156d3ac72 --- /dev/null +++ b/arch/i386/pci/init.c @@ -0,0 +1,25 @@ +#include <linux/config.h> +#include <linux/pci.h> +#include <linux/init.h> +#include "pci.h" + +/* arch_initcall has too random ordering, so call the initializers + in the right sequence from here. */ +static __init int pci_access_init(void) +{ +#ifdef CONFIG_PCI_MMCONFIG + pci_mmcfg_init(); +#endif + if (raw_pci_ops) + return 0; +#ifdef CONFIG_PCI_BIOS + pci_pcbios_init(); +#endif + if (raw_pci_ops) + return 0; +#ifdef CONFIG_PCI_DIRECT + pci_direct_init(); +#endif + return 0; +} +arch_initcall(pci_access_init); diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c index 0ee8a983708..613789071f3 100644 --- a/arch/i386/pci/mmconfig.c +++ b/arch/i386/pci/mmconfig.c @@ -172,25 +172,20 @@ static __init void unreachable_devices(void) } } -static int __init pci_mmcfg_init(void) +void __init pci_mmcfg_init(void) { if ((pci_probe & PCI_PROBE_MMCONF) == 0) - goto out; + return; acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); if ((pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || (pci_mmcfg_config[0].base_address == 0)) - goto out; + return; printk(KERN_INFO "PCI: Using MMCONFIG\n"); raw_pci_ops = &pci_mmcfg; pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; unreachable_devices(); - - out: - return 0; } - -arch_initcall(pci_mmcfg_init); diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c index b9d65f0bc2d..1eec0868f4b 100644 --- a/arch/i386/pci/pcbios.c +++ b/arch/i386/pci/pcbios.c @@ -476,14 +476,12 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) } EXPORT_SYMBOL(pcibios_set_irq_routing); -static int __init pci_pcbios_init(void) +void __init pci_pcbios_init(void) { if ((pci_probe & PCI_PROBE_BIOS) && ((raw_pci_ops = pci_find_bios()))) { pci_probe |= PCI_BIOS_SORT; pci_bios_present = 1; } - return 0; } -arch_initcall(pci_pcbios_init); diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h index f550781ec31..12035e29108 100644 --- a/arch/i386/pci/pci.h +++ b/arch/i386/pci/pci.h @@ -80,4 +80,7 @@ extern int pci_conf1_write(unsigned int seg, unsigned int bus, extern int pci_conf1_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value); +extern void pci_direct_init(void); +extern void pci_pcbios_init(void); +extern void pci_mmcfg_init(void); diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 626cdc83668..0e5c6ae5022 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -46,11 +46,6 @@ #define KEYBOARD_INTR 3 /* must match with simulator! */ #define NR_PORTS 1 /* only one port for now */ -#define SERIAL_INLINE 1 - -#ifdef SERIAL_INLINE -#define _INLINE_ inline -#endif #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) @@ -237,7 +232,7 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch) local_irq_restore(flags); } -static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) +static void transmit_chars(struct async_struct *info, int *intr_done) { int count; unsigned long flags; diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 1ce63926a3c..a4634b06f67 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } @@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_printf(p, " %14s", irq_desc[i].handler->typename); seq_printf(p, " %s", action->name); diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c index 703cbc6dc9c..15c16b62dff 100644 --- a/arch/m68k/bvme6000/rtc.c +++ b/arch/m68k/bvme6000/rtc.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ #include <linux/smp_lock.h> +#include <linux/bcd.h> #include <asm/bvme6000hw.h> #include <asm/io.h> @@ -32,9 +33,6 @@ * ioctls. */ -#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10) -#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10) - static unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7d93992e462..3dd76b3d296 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -68,9 +68,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } @@ -83,9 +82,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_printf(p, " %14s", irq_desc[i].handler->typename); seq_printf(p, " %s", action->name); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 06ed9075242..78d171bfa33 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, mb(); /* Send a message to all other CPUs and wait for them to respond */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i) && i != cpu) + for_each_online_cpu(i) + if (i != cpu) core_send_ipi(i, SMP_CALL_FUNCTION); /* Wait for response */ diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 73e5e52781d..2854ac4c9be 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -88,12 +88,9 @@ static inline int find_level(cpuid_t *cpunum, int irq) { int cpu, i; - for (cpu = 0; cpu <= NR_CPUS; cpu++) { + for_each_online_cpu(cpu) { struct slice_data *si = cpu_data[cpu].data; - if (!cpu_online(cpu)) - continue; - for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) if (si->level_to_irq[i] == irq) { *cpunum = cpu; diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 25564b7ca6b..d6ac1c60a47 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op) { int i; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i) && i != smp_processor_id()) + for_each_online_cpu(i) { + if (i != smp_processor_id()) send_IPI_single(i, op); } } @@ -643,14 +643,13 @@ int sys_cpus(int argc, char **argv) if ( argc == 1 ){ #ifdef DUMP_MORE_STATE - for(i=0; i<NR_CPUS; i++) { + for_each_online_cpu(i) { int cpus_per_line = 4; - if(cpu_online(i)) { - if (j++ % cpus_per_line) - printk(" %3d",i); - else - printk("\n %3d",i); - } + + if (j++ % cpus_per_line) + printk(" %3d",i); + else + printk("\n %3d",i); } printk("\n"); #else @@ -659,9 +658,7 @@ int sys_cpus(int argc, char **argv) } else if((argc==2) && !(strcmp(argv[1],"-l"))) { printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); #ifdef DUMP_MORE_STATE - for(i=0;i<NR_CPUS;i++) { - if (!cpu_online(i)) - continue; + for_each_online_cpu(i) { if (cpu_data[i].cpuid != NO_PROC_ID) { switch(cpu_data[i].state) { case STATE_RENDEZVOUS: @@ -695,9 +692,7 @@ int sys_cpus(int argc, char **argv) } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { #ifdef DUMP_MORE_STATE printk("\nCPUSTATE CPUID\n"); - for (i=0;i<NR_CPUS;i++) { - if (!cpu_online(i)) - continue; + for_each_online_cpu(i) { if (cpu_data[i].cpuid != NO_PROC_ID) { switch(cpu_data[i].state) { case STATE_RENDEZVOUS: diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 24dc8117b82..771a59cbd21 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -135,9 +135,8 @@ skip: #ifdef CONFIG_TAU_INT if (tau_initialized){ seq_puts(p, "TAU: "); - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", tau_interrupts(j)); + for_each_online_cpu(j) + seq_printf(p, "%10u ", tau_interrupts(j)); seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); } #endif diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 258039fb301..cb1fe5878e8 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -81,9 +81,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); free_insn_slot(p->ainsn.insn); - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); } static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index be12041c0fc..c1d62bf11f2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -162,9 +162,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) unsigned long bogosum = 0; int i; - for (i = 0; i < NR_CPUS; ++i) - if (cpu_online(i)) - bogosum += loops_per_jiffy; + for_each_online_cpu(i) + bogosum += loops_per_jiffy; seq_printf(m, "total bogomips\t: %lu.%02lu\n", bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); #endif /* CONFIG_SMP && CONFIG_PPC32 */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index db72a92943b..dc2770df25b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -272,9 +272,8 @@ int __init ppc_init(void) if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); /* register CPU devices */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_possible(i)) - register_cpu(&cpu_devices[i], i, NULL); + for_each_cpu(i) + register_cpu(&cpu_devices[i], i, NULL); /* call platform init */ if (ppc_md.init != NULL) { diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 6d64a9bf347..1065d87fc27 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -191,9 +191,7 @@ static void smp_psurge_message_pass(int target, int msg) if (num_online_cpus() < 2) return; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_online(i)) - continue; + for_each_online_cpu(i) { if (target == MSG_ALL || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) || target == i) { diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index c08ab432e95..53e9deacee8 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c @@ -168,9 +168,8 @@ int show_cpuinfo(struct seq_file *m, void *v) /* Show summary information */ #ifdef CONFIG_SMP unsigned long bogosum = 0; - for (i = 0; i < NR_CPUS; ++i) - if (cpu_online(i)) - bogosum += cpu_data[i].loops_per_jiffy; + for_each_online_cpu(i) + bogosum += cpu_data[i].loops_per_jiffy; seq_printf(m, "total bogomips\t: %lu.%02lu\n", bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); #endif /* CONFIG_SMP */ @@ -712,9 +711,8 @@ int __init ppc_init(void) if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); /* register CPU devices */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_possible(i)) - register_cpu(&cpu_devices[i], i, NULL); + for_each_cpu(i) + register_cpu(&cpu_devices[i], i, NULL); /* call platform init */ if (ppc_md.init != NULL) { diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 7dbe00c76c6..d52d6d211d9 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -799,9 +799,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) */ print_cpu_info(&S390_lowcore.cpu_data); - for(i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { lowcore_ptr[i] = (struct _lowcore *) __get_free_pages(GFP_KERNEL|GFP_DMA, sizeof(void*) == 8 ? 1 : 0); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 6883c00728c..b56e79632f2 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -35,9 +35,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_puts(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index a067a34e0b6..c0e79843f58 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -404,9 +404,8 @@ static int __init topology_init(void) { int cpu_id; - for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) - if (cpu_possible(cpu_id)) - register_cpu(&cpu[cpu_id], cpu_id, NULL); + for_each_cpu(cpu_id) + register_cpu(&cpu[cpu_id], cpu_id, NULL); return 0; } diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c index 9fc2b71dbd8..d69879c0e06 100644 --- a/arch/sh64/kernel/irq.c +++ b/arch/sh64/kernel/irq.c @@ -53,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_puts(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 410b9a72aba..4c60a6ef54a 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c @@ -184,9 +184,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) { - if (cpu_online(j)) - seq_printf(p, "%10u ", + for_each_online_cpu(j) { + seq_printf(p, "%10u ", kstat_cpu(cpu_logical_map(j)).irqs[i]); } #endif diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index c6e721d8f47..ea5682ce703 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c @@ -243,9 +243,8 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; spin_lock_irqsave(&prof_setup_lock, flags); - for(i = 0; i < NR_CPUS; i++) { - if (cpu_possible(i)) - load_profile_irq(i, lvl14_resolution / multiplier); + for_each_cpu(i) { + load_profile_irq(i, lvl14_resolution / multiplier); prof_multiplier(i) = multiplier; } spin_unlock_irqrestore(&prof_setup_lock, flags); @@ -273,13 +272,12 @@ void smp_bogo(struct seq_file *m) { int i; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) - seq_printf(m, - "Cpu%dBogo\t: %lu.%02lu\n", - i, - cpu_data(i).udelay_val/(500000/HZ), - (cpu_data(i).udelay_val/(5000/HZ))%100); + for_each_online_cpu(i) { + seq_printf(m, + "Cpu%dBogo\t: %lu.%02lu\n", + i, + cpu_data(i).udelay_val/(500000/HZ), + (cpu_data(i).udelay_val/(5000/HZ))%100); } } @@ -288,8 +286,6 @@ void smp_info(struct seq_file *m) int i; seq_printf(m, "State:\n"); - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) - seq_printf(m, "CPU%d\t\t: online\n", i); - } + for_each_online_cpu(i) + seq_printf(m, "CPU%d\t\t: online\n", i); } diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index 52621348a56..cea7fc6fc6e 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c @@ -103,11 +103,9 @@ found_it: seq_printf(p, "%3d: ", i); #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (x = 0; x < NR_CPUS; x++) { - if (cpu_online(x)) - seq_printf(p, "%10u ", - kstat_cpu(cpu_logical_map(x)).irqs[i]); - } + for_each_online_cpu(x) + seq_printf(p, "%10u ", + kstat_cpu(cpu_logical_map(x)).irqs[i]); #endif seq_printf(p, "%c %s", (action->flags & SA_INTERRUPT) ? '+' : ' ', diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 4219dd2ce3a..41bb9596be4 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -249,11 +249,9 @@ void __init smp4d_boot_cpus(void) } else { unsigned long bogosum = 0; - for(i = 0; i < NR_CPUS; i++) { - if (cpu_isset(i, cpu_present_map)) { - bogosum += cpu_data(i).udelay_val; - smp_highest_cpu = i; - } + for_each_present_cpu(i) { + bogosum += cpu_data(i).udelay_val; + smp_highest_cpu = i; } SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index fbbd8a474c4..1dde312eebd 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -218,10 +218,8 @@ void __init smp4m_boot_cpus(void) cpu_present_map = cpumask_of_cpu(smp_processor_id()); } else { unsigned long bogosum = 0; - for(i = 0; i < NR_CPUS; i++) { - if (cpu_isset(i, cpu_present_map)) - bogosum += cpu_data(i).udelay_val; - } + for_each_present_cpu(i) + bogosum += cpu_data(i).udelay_val; printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 8c93ba655b3..e505a4125e3 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -117,9 +117,7 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) { - if (!cpu_online(j)) - continue; + for_each_online_cpu(j) { seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); } diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 373a701c90a..1b6e2ade100 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -57,25 +57,21 @@ void smp_info(struct seq_file *m) int i; seq_printf(m, "State:\n"); - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) - seq_printf(m, - "CPU%d:\t\tonline\n", i); - } + for_each_online_cpu(i) + seq_printf(m, "CPU%d:\t\tonline\n", i); } void smp_bogo(struct seq_file *m) { int i; - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) - seq_printf(m, - "Cpu%dBogo\t: %lu.%02lu\n" - "Cpu%dClkTck\t: %016lx\n", - i, cpu_data(i).udelay_val / (500000/HZ), - (cpu_data(i).udelay_val / (5000/HZ)) % 100, - i, cpu_data(i).clock_tick); + for_each_online_cpu(i) + seq_printf(m, + "Cpu%dBogo\t: %lu.%02lu\n" + "Cpu%dClkTck\t: %016lx\n", + i, cpu_data(i).udelay_val / (500000/HZ), + (cpu_data(i).udelay_val / (5000/HZ)) % 100, + i, cpu_data(i).clock_tick); } void __init smp_store_cpu_info(int id) @@ -1282,7 +1278,7 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; spin_lock_irqsave(&prof_setup_lock, flags); - for (i = 0; i < NR_CPUS; i++) + for_each_cpu(i) prof_multiplier(i) = multiplier; current_tick_offset = (timer_tick_offset / multiplier); spin_unlock_irqrestore(&prof_setup_lock, flags); @@ -1384,10 +1380,8 @@ void __init smp_cpus_done(unsigned int max_cpus) unsigned long bogosum = 0; int i; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) - bogosum += cpu_data(i).udelay_val; - } + for_each_online_cpu(i) + bogosum += cpu_data(i).udelay_val; printk("Total of %ld processors activated " "(%lu.%02lu BogoMIPS).\n", (long) num_online_cpus(), diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ded63ee9c4f..1539a8362b6 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1828,8 +1828,8 @@ void __flush_tlb_all(void) void online_page(struct page *page) { ClearPageReserved(page); - set_page_count(page, 0); - free_cold_page(page); + init_page_count(page); + __free_page(page); totalram_pages++; num_physpages++; } diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 27cdf916442..80c9c18aae9 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -491,6 +491,16 @@ void __init check_bugs(void) check_devanon(); } -void apply_alternatives(void *start, void *end) +void apply_alternatives(struct alt_instr *start, struct alt_instr *end) +{ +} + +void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end) +{ +} + +void alternatives_smp_module_del(struct module *mod) { } diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c index 6dffb498ccd..a8a6aa70d69 100644 --- a/arch/x86_64/kernel/early_printk.c +++ b/arch/x86_64/kernel/early_printk.c @@ -17,11 +17,8 @@ #define VGABASE ((void __iomem *)0xffffffff800b8000UL) #endif -#define MAX_YPOS max_ypos -#define MAX_XPOS max_xpos - static int max_ypos = 25, max_xpos = 80; -static int current_ypos = 1, current_xpos = 0; +static int current_ypos = 25, current_xpos = 0; static void early_vga_write(struct console *con, const char *str, unsigned n) { @@ -29,26 +26,26 @@ static void early_vga_write(struct console *con, const char *str, unsigned n) int i, k, j; while ((c = *str++) != '\0' && n-- > 0) { - if (current_ypos >= MAX_YPOS) { + if (current_ypos >= max_ypos) { /* scroll 1 line up */ - for (k = 1, j = 0; k < MAX_YPOS; k++, j++) { - for (i = 0; i < MAX_XPOS; i++) { - writew(readw(VGABASE + 2*(MAX_XPOS*k + i)), - VGABASE + 2*(MAX_XPOS*j + i)); + for (k = 1, j = 0; k < max_ypos; k++, j++) { + for (i = 0; i < max_xpos; i++) { + writew(readw(VGABASE+2*(max_xpos*k+i)), + VGABASE + 2*(max_xpos*j + i)); } } - for (i = 0; i < MAX_XPOS; i++) - writew(0x720, VGABASE + 2*(MAX_XPOS*j + i)); - current_ypos = MAX_YPOS-1; + for (i = 0; i < max_xpos; i++) + writew(0x720, VGABASE + 2*(max_xpos*j + i)); + current_ypos = max_ypos-1; } if (c == '\n') { current_xpos = 0; current_ypos++; } else if (c != '\r') { writew(((0x7 << 8) | (unsigned short) c), - VGABASE + 2*(MAX_XPOS*current_ypos + + VGABASE + 2*(max_xpos*current_ypos + current_xpos++)); - if (current_xpos >= MAX_XPOS) { + if (current_xpos >= max_xpos) { current_xpos = 0; current_ypos++; } @@ -244,6 +241,7 @@ int __init setup_early_printk(char *opt) && SCREEN_INFO.orig_video_isVGA == 1) { max_xpos = SCREEN_INFO.orig_video_cols; max_ypos = SCREEN_INFO.orig_video_lines; + current_ypos = SCREEN_INFO.orig_y; early_console = &early_vga_console; } else if (!strncmp(buf, "simnow", 6)) { simnow_init(buf + 6); diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index 30d2a1e545f..d8bd0b345b1 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c @@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } @@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", - kstat_cpu(j).irqs[i]); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_printf(p, " %14s", irq_desc[i].handler->typename); @@ -68,15 +65,13 @@ skip: spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); seq_putc(p, '\n'); #ifdef CONFIG_X86_LOCAL_APIC seq_printf(p, "LOC: "); - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); seq_putc(p, '\n'); #endif seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 8b866a8572c..14f0ced613b 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -222,9 +222,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); free_insn_slot(p->ainsn.insn); - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); } static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 5bf17e41cd2..66c009e10ba 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -162,9 +162,7 @@ int __init check_nmi_watchdog (void) local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - for (cpu = 0; cpu < NR_CPUS; cpu++) { - if (!cpu_online(cpu)) - continue; + for_each_online_cpu(cpu) { if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { endflag = 1; printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index 5876df116c9..e5f5ce7909a 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c @@ -443,9 +443,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) if (!user_mode(regs)) return 1; - if (try_to_freeze()) - goto no_signal; - if (!oldset) oldset = ¤t->blocked; @@ -463,7 +460,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) return handle_signal(signr, &info, &ka, oldset, regs); } - no_signal: /* Did we come from a system call? */ if ((long)regs->orig_rax >= 0) { /* Restart the system call - no handlers present */ diff --git a/arch/x86_64/pci/Makefile b/arch/x86_64/pci/Makefile index a8f75a2a0f6..a3f6ad57017 100644 --- a/arch/x86_64/pci/Makefile +++ b/arch/x86_64/pci/Makefile @@ -7,7 +7,7 @@ CFLAGS += -Iarch/i386/pci obj-y := i386.o obj-$(CONFIG_PCI_DIRECT)+= direct.o -obj-y += fixup.o +obj-y += fixup.o init.o obj-$(CONFIG_ACPI) += acpi.o obj-y += legacy.o irq.o common.o # mmconfig has a 64bit special @@ -22,3 +22,4 @@ irq-y += ../../i386/pci/irq.o common-y += ../../i386/pci/common.o fixup-y += ../../i386/pci/fixup.o i386-y += ../../i386/pci/i386.o +init-y += ../../i386/pci/init.o diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 4cbf6d91571..51f9bed455f 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -83,9 +83,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } @@ -98,9 +97,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_printf(p, " %14s", irq_desc[i].handler->typename); seq_printf(p, " %s", action->name); @@ -113,9 +111,8 @@ skip: spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", nmi_count(j)); + for_each_online_cpu(j) + seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); } diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c index 94fdfe474ac..2a580efb58e 100644 --- a/arch/xtensa/platform-iss/console.c +++ b/arch/xtensa/platform-iss/console.c @@ -31,10 +31,6 @@ #include <linux/tty.h> #include <linux/tty_flip.h> -#ifdef SERIAL_INLINE -#define _INLINE_ inline -#endif - #define SERIAL_MAX_NUM_LINES 1 #define SERIAL_TIMER_VALUE (20 * HZ) diff --git a/block/ioctl.c b/block/ioctl.c index e1109491c23..35fdb7dc651 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -42,9 +42,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user return -EINVAL; } /* partition number in use? */ - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); if (disk->part[part - 1]) { - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); return -EBUSY; } /* overlap? */ @@ -55,13 +55,13 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user continue; if (!(start+length <= s->start_sect || start >= s->start_sect + s->nr_sects)) { - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); return -EBUSY; } } /* all seems OK */ add_partition(disk, part, start, length); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); return 0; case BLKPG_DEL_PARTITION: if (!disk->part[part-1]) @@ -71,9 +71,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user bdevp = bdget_disk(disk, part); if (!bdevp) return -ENOMEM; - down(&bdevp->bd_sem); + mutex_lock(&bdevp->bd_mutex); if (bdevp->bd_openers) { - up(&bdevp->bd_sem); + mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); return -EBUSY; } @@ -81,10 +81,10 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user fsync_bdev(bdevp); invalidate_bdev(bdevp, 0); - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); delete_partition(disk, part); - up(&bdev->bd_sem); - up(&bdevp->bd_sem); + mutex_unlock(&bdev->bd_mutex); + mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); return 0; @@ -102,10 +102,10 @@ static int blkdev_reread_part(struct block_device *bdev) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (down_trylock(&bdev->bd_sem)) + if (!mutex_trylock(&bdev->bd_mutex)) return -EBUSY; res = rescan_partitions(disk, bdev); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); return res; } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 33e2ca847a2..82710ae3922 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -205,6 +205,18 @@ config ACPI_IBM If you have an IBM ThinkPad laptop, say Y or M here. +config ACPI_IBM_DOCK + bool "Legacy Docking Station Support" + depends on ACPI_IBM + default n + ---help--- + Allows the ibm_acpi driver to handle docking station events. + This support is obsoleted by CONFIG_HOTPLUG_PCI_ACPI. It will + allow locking and removing the laptop from the docking station, + but will not properly connect PCI devices. + + If you are not sure, say N here. + config ACPI_TOSHIBA tristate "Toshiba Laptop Extras" depends on X86 diff --git a/drivers/acpi/ibm_acpi.c b/drivers/acpi/ibm_acpi.c index 5cc090326dd..262b1f41335 100644 --- a/drivers/acpi/ibm_acpi.c +++ b/drivers/acpi/ibm_acpi.c @@ -160,13 +160,13 @@ IBM_HANDLE(cmos, root, "\\UCMS", /* R50, R50e, R50p, R51, T4x, X31, X40 */ "\\CMOS", /* A3x, G4x, R32, T23, T30, X22-24, X30 */ "\\CMS", /* R40, R40e */ ); /* all others */ - +#ifdef CONFIG_ACPI_IBM_DOCK IBM_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */ "\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */ "\\_SB.PCI0.PCI1.DOCK", /* all others */ "\\_SB.PCI.ISA.SLCE", /* 570 */ ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */ - +#endif IBM_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */ "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */ "\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */ @@ -844,7 +844,7 @@ static int _sta(acpi_handle handle) return status; } - +#ifdef CONFIG_ACPI_IBM_DOCK #define dock_docked() (_sta(dock_handle) & 1) static int dock_read(char *p) @@ -907,6 +907,7 @@ static void dock_notify(struct ibm_struct *ibm, u32 event) acpi_bus_generate_event(ibm->device, event, 0); /* unknown */ } } +#endif static int bay_status_supported; static int bay_status2_supported; @@ -1574,6 +1575,7 @@ static struct ibm_struct ibms[] = { .read = light_read, .write = light_write, }, +#ifdef CONFIG_ACPI_IBM_DOCK { .name = "dock", .read = dock_read, @@ -1589,6 +1591,7 @@ static struct ibm_struct ibms[] = { .handle = &pci_handle, .type = ACPI_SYSTEM_NOTIFY, }, +#endif { .name = "bay", .init = bay_init, @@ -1880,7 +1883,9 @@ IBM_PARAM(hotkey); IBM_PARAM(bluetooth); IBM_PARAM(video); IBM_PARAM(light); +#ifdef CONFIG_ACPI_IBM_DOCK IBM_PARAM(dock); +#endif IBM_PARAM(bay); IBM_PARAM(cmos); IBM_PARAM(led); @@ -1927,7 +1932,9 @@ static int __init acpi_ibm_init(void) IBM_HANDLE_INIT(hkey); IBM_HANDLE_INIT(lght); IBM_HANDLE_INIT(cmos); +#ifdef CONFIG_ACPI_IBM_DOCK IBM_HANDLE_INIT(dock); +#endif IBM_HANDLE_INIT(pci); IBM_HANDLE_INIT(bay); if (bay_handle) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 9271e5209ac..a0ab828b2cc 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -23,7 +23,6 @@ static LIST_HEAD(acpi_device_list); DEFINE_SPINLOCK(acpi_device_lock); LIST_HEAD(acpi_wakeup_device_list); -static int acpi_bus_trim(struct acpi_device *start, int rmdevice); static void acpi_device_release(struct kobject *kobj) { @@ -1284,7 +1283,7 @@ int acpi_bus_start(struct acpi_device *device) EXPORT_SYMBOL(acpi_bus_start); -static int acpi_bus_trim(struct acpi_device *start, int rmdevice) +int acpi_bus_trim(struct acpi_device *start, int rmdevice) { acpi_status status; struct acpi_device *parent, *child; @@ -1337,6 +1336,8 @@ static int acpi_bus_trim(struct acpi_device *start, int rmdevice) } return err; } +EXPORT_SYMBOL_GPL(acpi_bus_trim); + static int acpi_bus_scan_fixed(struct acpi_device *root) { diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c index 8660779fb28..bdb60663f2e 100644 --- a/drivers/base/power/suspend.c +++ b/drivers/base/power/suspend.c @@ -8,6 +8,7 @@ * */ +#include <linux/vt_kern.h> #include <linux/device.h> #include "../base.h" #include "power.h" @@ -62,7 +63,6 @@ int suspend_device(struct device * dev, pm_message_t state) return error; } - /** * device_suspend - Save state and stop all devices in system. * @state: Power state to put each device in. @@ -82,6 +82,9 @@ int device_suspend(pm_message_t state) { int error = 0; + if (!is_console_suspend_safe()) + return -EINVAL; + down(&dpm_sem); down(&dpm_list_sem); while (!list_empty(&dpm_active) && error == 0) { diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index cf39cf9aac2..e29b8926f80 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -3268,8 +3268,8 @@ clean2: unregister_blkdev(hba[i]->major, hba[i]->devname); clean1: release_io_mem(hba[i]); - free_hba(i); hba[i]->busy_initializing = 0; + free_hba(i); return(-1); } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index d23b54332d7..fb2d0be7cde 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -179,6 +179,7 @@ static int print_unex = 1; #include <linux/devfs_fs_kernel.h> #include <linux/platform_device.h> #include <linux/buffer_head.h> /* for invalidate_buffers() */ +#include <linux/mutex.h> /* * PS/2 floppies have much slower step rates than regular floppies. @@ -413,7 +414,7 @@ static struct floppy_write_errors write_errors[N_DRIVE]; static struct timer_list motor_off_timer[N_DRIVE]; static struct gendisk *disks[N_DRIVE]; static struct block_device *opened_bdev[N_DRIVE]; -static DECLARE_MUTEX(open_lock); +static DEFINE_MUTEX(open_lock); static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; /* @@ -3333,7 +3334,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, if (type) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; - down(&open_lock); + mutex_lock(&open_lock); LOCK_FDC(drive, 1); floppy_type[type] = *g; floppy_type[type].name = "user format"; @@ -3347,7 +3348,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, continue; __invalidate_device(bdev); } - up(&open_lock); + mutex_unlock(&open_lock); } else { int oldStretch; LOCK_FDC(drive, 1); @@ -3674,7 +3675,7 @@ static int floppy_release(struct inode *inode, struct file *filp) { int drive = (long)inode->i_bdev->bd_disk->private_data; - down(&open_lock); + mutex_lock(&open_lock); if (UDRS->fd_ref < 0) UDRS->fd_ref = 0; else if (!UDRS->fd_ref--) { @@ -3684,7 +3685,7 @@ static int floppy_release(struct inode *inode, struct file *filp) if (!UDRS->fd_ref) opened_bdev[drive] = NULL; floppy_release_irq_and_dma(); - up(&open_lock); + mutex_unlock(&open_lock); return 0; } @@ -3702,7 +3703,7 @@ static int floppy_open(struct inode *inode, struct file *filp) char *tmp; filp->private_data = (void *)0; - down(&open_lock); + mutex_lock(&open_lock); old_dev = UDRS->fd_device; if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev) goto out2; @@ -3785,7 +3786,7 @@ static int floppy_open(struct inode *inode, struct file *filp) if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE))) goto out; } - up(&open_lock); + mutex_unlock(&open_lock); return 0; out: if (UDRS->fd_ref < 0) @@ -3796,7 +3797,7 @@ out: opened_bdev[drive] = NULL; floppy_release_irq_and_dma(); out2: - up(&open_lock); + mutex_unlock(&open_lock); return res; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 0010704739e..74bf0255e98 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1144,7 +1144,7 @@ static int lo_ioctl(struct inode * inode, struct file * file, struct loop_device *lo = inode->i_bdev->bd_disk->private_data; int err; - down(&lo->lo_ctl_mutex); + mutex_lock(&lo->lo_ctl_mutex); switch (cmd) { case LOOP_SET_FD: err = loop_set_fd(lo, file, inode->i_bdev, arg); @@ -1170,7 +1170,7 @@ static int lo_ioctl(struct inode * inode, struct file * file, default: err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; } - up(&lo->lo_ctl_mutex); + mutex_unlock(&lo->lo_ctl_mutex); return err; } @@ -1178,9 +1178,9 @@ static int lo_open(struct inode *inode, struct file *file) { struct loop_device *lo = inode->i_bdev->bd_disk->private_data; - down(&lo->lo_ctl_mutex); + mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; - up(&lo->lo_ctl_mutex); + mutex_unlock(&lo->lo_ctl_mutex); return 0; } @@ -1189,9 +1189,9 @@ static int lo_release(struct inode *inode, struct file *file) { struct loop_device *lo = inode->i_bdev->bd_disk->private_data; - down(&lo->lo_ctl_mutex); + mutex_lock(&lo->lo_ctl_mutex); --lo->lo_refcnt; - up(&lo->lo_ctl_mutex); + mutex_unlock(&lo->lo_ctl_mutex); return 0; } @@ -1233,12 +1233,12 @@ int loop_unregister_transfer(int number) xfer_funcs[n] = NULL; for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) { - down(&lo->lo_ctl_mutex); + mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_encryption == xfer) loop_release_xfer(lo); - up(&lo->lo_ctl_mutex); + mutex_unlock(&lo->lo_ctl_mutex); } return 0; @@ -1285,7 +1285,7 @@ static int __init loop_init(void) lo->lo_queue = blk_alloc_queue(GFP_KERNEL); if (!lo->lo_queue) goto out_mem4; - init_MUTEX(&lo->lo_ctl_mutex); + mutex_init(&lo->lo_ctl_mutex); init_completion(&lo->lo_done); init_completion(&lo->lo_bh_done); lo->lo_number = i; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 6997d8e6bfb..a9bde30dada 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -459,9 +459,9 @@ static void do_nbd_request(request_queue_t * q) req->errors = 0; spin_unlock_irq(q->queue_lock); - down(&lo->tx_lock); + mutex_lock(&lo->tx_lock); if (unlikely(!lo->sock)) { - up(&lo->tx_lock); + mutex_unlock(&lo->tx_lock); printk(KERN_ERR "%s: Attempted send on closed socket\n", lo->disk->disk_name); req->errors++; @@ -484,7 +484,7 @@ static void do_nbd_request(request_queue_t * q) } lo->active_req = NULL; - up(&lo->tx_lock); + mutex_unlock(&lo->tx_lock); wake_up_all(&lo->active_wq); spin_lock_irq(q->queue_lock); @@ -534,9 +534,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file, case NBD_CLEAR_SOCK: error = 0; - down(&lo->tx_lock); + mutex_lock(&lo->tx_lock); lo->sock = NULL; - up(&lo->tx_lock); + mutex_unlock(&lo->tx_lock); file = lo->file; lo->file = NULL; nbd_clear_que(lo); @@ -590,7 +590,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, * FIXME: This code is duplicated from sys_shutdown, but * there should be a more generic interface rather than * calling socket ops directly here */ - down(&lo->tx_lock); + mutex_lock(&lo->tx_lock); if (lo->sock) { printk(KERN_WARNING "%s: shutting down socket\n", lo->disk->disk_name); @@ -598,7 +598,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, SEND_SHUTDOWN|RCV_SHUTDOWN); lo->sock = NULL; } - up(&lo->tx_lock); + mutex_unlock(&lo->tx_lock); file = lo->file; lo->file = NULL; nbd_clear_que(lo); @@ -683,7 +683,7 @@ static int __init nbd_init(void) nbd_dev[i].flags = 0; spin_lock_init(&nbd_dev[i].queue_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); - init_MUTEX(&nbd_dev[i].tx_lock); + mutex_init(&nbd_dev[i].tx_lock); init_waitqueue_head(&nbd_dev[i].active_wq); nbd_dev[i].blksize = 1024; nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 476a5b553f3..1d261f985f3 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -56,6 +56,7 @@ #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/suspend.h> +#include <linux/mutex.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi.h> @@ -81,7 +82,7 @@ static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; static struct proc_dir_entry *pkt_proc; static int pkt_major; -static struct semaphore ctl_mutex; /* Serialize open/close/setup/teardown */ +static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ static mempool_t *psd_pool; @@ -2018,7 +2019,7 @@ static int pkt_open(struct inode *inode, struct file *file) VPRINTK("pktcdvd: entering open\n"); - down(&ctl_mutex); + mutex_lock(&ctl_mutex); pd = pkt_find_dev_from_minor(iminor(inode)); if (!pd) { ret = -ENODEV; @@ -2044,14 +2045,14 @@ static int pkt_open(struct inode *inode, struct file *file) set_blocksize(inode->i_bdev, CD_FRAMESIZE); } - up(&ctl_mutex); + mutex_unlock(&ctl_mutex); return 0; out_dec: pd->refcnt--; out: VPRINTK("pktcdvd: failed open (%d)\n", ret); - up(&ctl_mutex); + mutex_unlock(&ctl_mutex); return ret; } @@ -2060,14 +2061,14 @@ static int pkt_close(struct inode *inode, struct file *file) struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; int ret = 0; - down(&ctl_mutex); + mutex_lock(&ctl_mutex); pd->refcnt--; BUG_ON(pd->refcnt < 0); if (pd->refcnt == 0) { int flush = test_bit(PACKET_WRITABLE, &pd->flags); pkt_release_dev(pd, flush); } - up(&ctl_mutex); + mutex_unlock(&ctl_mutex); return ret; } @@ -2596,21 +2597,21 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm case PKT_CTRL_CMD_SETUP: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - down(&ctl_mutex); + mutex_lock(&ctl_mutex); ret = pkt_setup_dev(&ctrl_cmd); - up(&ctl_mutex); + mutex_unlock(&ctl_mutex); break; case PKT_CTRL_CMD_TEARDOWN: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - down(&ctl_mutex); + mutex_lock(&ctl_mutex); ret = pkt_remove_dev(&ctrl_cmd); - up(&ctl_mutex); + mutex_unlock(&ctl_mutex); break; case PKT_CTRL_CMD_STATUS: - down(&ctl_mutex); + mutex_lock(&ctl_mutex); pkt_get_status(&ctrl_cmd); - up(&ctl_mutex); + mutex_unlock(&ctl_mutex); break; default: return -ENOTTY; @@ -2656,7 +2657,7 @@ static int __init pkt_init(void) goto out; } - init_MUTEX(&ctl_mutex); + mutex_init(&ctl_mutex); pkt_proc = proc_mkdir("pktcdvd", proc_root_driver); diff --git a/drivers/block/rd.c b/drivers/block/rd.c index ffd6abd6d5a..1c54f46d3f7 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -310,12 +310,12 @@ static int rd_ioctl(struct inode *inode, struct file *file, * cache */ error = -EBUSY; - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); if (bdev->bd_openers <= 2) { truncate_inode_pages(bdev->bd_inode->i_mapping, 0); error = 0; } - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); return error; } diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 879bbc26ce9..a59876a0bfa 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -407,7 +407,6 @@ int register_cdrom(struct cdrom_device_info *cdi) ENSURE(get_mcn, CDC_MCN); ENSURE(reset, CDC_RESET); ENSURE(audio_ioctl, CDC_PLAY_AUDIO); - ENSURE(dev_ioctl, CDC_IOCTLS); ENSURE(generic_packet, CDC_GENERIC_PACKET); cdi->mc_flags = 0; cdo->n_minors = 0; @@ -2196,395 +2195,586 @@ retry: return cdrom_read_cdda_old(cdi, ubuf, lba, nframes); } -/* Just about every imaginable ioctl is supported in the Uniform layer - * these days. ATAPI / SCSI specific code now mainly resides in - * mmc_ioct(). - */ -int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi, - struct inode *ip, unsigned int cmd, unsigned long arg) +static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi, + void __user *argp) { - struct cdrom_device_ops *cdo = cdi->ops; + struct cdrom_multisession ms_info; + u8 requested_format; int ret; - /* Try the generic SCSI command ioctl's first.. */ - ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, (void __user *)arg); - if (ret != -ENOTTY) + cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); + + if (!(cdi->ops->capability & CDC_MULTI_SESSION)) + return -ENOSYS; + + if (copy_from_user(&ms_info, argp, sizeof(ms_info))) + return -EFAULT; + + requested_format = ms_info.addr_format; + if (requested_format != CDROM_MSF && requested_format != CDROM_LBA) + return -EINVAL; + ms_info.addr_format = CDROM_LBA; + + ret = cdi->ops->get_last_session(cdi, &ms_info); + if (ret) return ret; - /* the first few commands do not deal with audio drive_info, but - only with routines in cdrom device operations. */ - switch (cmd) { - case CDROMMULTISESSION: { - struct cdrom_multisession ms_info; - u_char requested_format; - cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); - if (!(cdo->capability & CDC_MULTI_SESSION)) - return -ENOSYS; - IOCTL_IN(arg, struct cdrom_multisession, ms_info); - requested_format = ms_info.addr_format; - if (!((requested_format == CDROM_MSF) || - (requested_format == CDROM_LBA))) - return -EINVAL; - ms_info.addr_format = CDROM_LBA; - if ((ret=cdo->get_last_session(cdi, &ms_info))) + sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format); + + if (copy_to_user(argp, &ms_info, sizeof(ms_info))) + return -EFAULT; + + cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); + return 0; +} + +static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) +{ + cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); + + if (!CDROM_CAN(CDC_OPEN_TRAY)) + return -ENOSYS; + if (cdi->use_count != 1 || keeplocked) + return -EBUSY; + if (CDROM_CAN(CDC_LOCK)) { + int ret = cdi->ops->lock_door(cdi, 0); + if (ret) return ret; - sanitize_format(&ms_info.addr, &ms_info.addr_format, - requested_format); - IOCTL_OUT(arg, struct cdrom_multisession, ms_info); - cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); - return 0; - } + } - case CDROMEJECT: { - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); - if (!CDROM_CAN(CDC_OPEN_TRAY)) - return -ENOSYS; - if (cdi->use_count != 1 || keeplocked) - return -EBUSY; - if (CDROM_CAN(CDC_LOCK)) - if ((ret=cdo->lock_door(cdi, 0))) - return ret; + return cdi->ops->tray_move(cdi, 1); +} - return cdo->tray_move(cdi, 1); - } +static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi) +{ + cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); - case CDROMCLOSETRAY: { - cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); - if (!CDROM_CAN(CDC_CLOSE_TRAY)) - return -ENOSYS; - return cdo->tray_move(cdi, 0); - } + if (!CDROM_CAN(CDC_CLOSE_TRAY)) + return -ENOSYS; + return cdi->ops->tray_move(cdi, 0); +} - case CDROMEJECT_SW: { - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); - if (!CDROM_CAN(CDC_OPEN_TRAY)) - return -ENOSYS; - if (keeplocked) - return -EBUSY; - cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); - if (arg) - cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT; - return 0; - } +static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); - case CDROM_MEDIA_CHANGED: { - struct cdrom_changer_info *info; - int changed; + if (!CDROM_CAN(CDC_OPEN_TRAY)) + return -ENOSYS; + if (keeplocked) + return -EBUSY; - cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); - if (!CDROM_CAN(CDC_MEDIA_CHANGED)) - return -ENOSYS; + cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); + if (arg) + cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT; + return 0; +} - /* cannot select disc or select current disc */ - if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) - return media_changed(cdi, 1); +static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, + unsigned long arg) +{ + struct cdrom_changer_info *info; + int ret; - if ((unsigned int)arg >= cdi->capacity) - return -EINVAL; + cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; + if (!CDROM_CAN(CDC_MEDIA_CHANGED)) + return -ENOSYS; - if ((ret = cdrom_read_mech_status(cdi, info))) { - kfree(info); - return ret; - } + /* cannot select disc or select current disc */ + if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) + return media_changed(cdi, 1); - changed = info->slots[arg].change; - kfree(info); - return changed; - } + if ((unsigned int)arg >= cdi->capacity) + return -EINVAL; - case CDROM_SET_OPTIONS: { - cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); - /* options need to be in sync with capability. too late for - that, so we have to check each one separately... */ - switch (arg) { - case CDO_USE_FFLAGS: - case CDO_CHECK_TYPE: - break; - case CDO_LOCK: - if (!CDROM_CAN(CDC_LOCK)) - return -ENOSYS; - break; - case 0: - return cdi->options; - /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */ - default: - if (!CDROM_CAN(arg)) - return -ENOSYS; - } - cdi->options |= (int) arg; - return cdi->options; - } + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; - case CDROM_CLEAR_OPTIONS: { - cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); - cdi->options &= ~(int) arg; - return cdi->options; - } + ret = cdrom_read_mech_status(cdi, info); + if (!ret) + ret = info->slots[arg].change; + kfree(info); + return ret; +} - case CDROM_SELECT_SPEED: { - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); - if (!CDROM_CAN(CDC_SELECT_SPEED)) - return -ENOSYS; - return cdo->select_speed(cdi, arg); - } +static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); - case CDROM_SELECT_DISC: { - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); - if (!CDROM_CAN(CDC_SELECT_DISC)) + /* + * Options need to be in sync with capability. + * Too late for that, so we have to check each one separately. + */ + switch (arg) { + case CDO_USE_FFLAGS: + case CDO_CHECK_TYPE: + break; + case CDO_LOCK: + if (!CDROM_CAN(CDC_LOCK)) + return -ENOSYS; + break; + case 0: + return cdi->options; + /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */ + default: + if (!CDROM_CAN(arg)) return -ENOSYS; + } + cdi->options |= (int) arg; + return cdi->options; +} - if ((arg != CDSL_CURRENT) && (arg != CDSL_NONE)) - if ((int)arg >= cdi->capacity) - return -EINVAL; - - /* cdo->select_disc is a hook to allow a driver-specific - * way of seleting disc. However, since there is no - * equiv hook for cdrom_slot_status this may not - * actually be useful... - */ - if (cdo->select_disc != NULL) - return cdo->select_disc(cdi, arg); - - /* no driver specific select_disc(), call our own */ - cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); - return cdrom_select_disc(cdi, arg); - } +static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); - case CDROMRESET: { - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n"); - if (!CDROM_CAN(CDC_RESET)) - return -ENOSYS; - invalidate_bdev(ip->i_bdev, 0); - return cdo->reset(cdi); - } + cdi->options &= ~(int) arg; + return cdi->options; +} - case CDROM_LOCKDOOR: { - cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); - if (!CDROM_CAN(CDC_LOCK)) - return -EDRIVE_CANT_DO_THIS; - keeplocked = arg ? 1 : 0; - /* don't unlock the door on multiple opens,but allow root - * to do so */ - if ((cdi->use_count != 1) && !arg && !capable(CAP_SYS_ADMIN)) - return -EBUSY; - return cdo->lock_door(cdi, arg); - } +static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); - case CDROM_DEBUG: { - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); - debug = arg ? 1 : 0; - return debug; - } + if (!CDROM_CAN(CDC_SELECT_SPEED)) + return -ENOSYS; + return cdi->ops->select_speed(cdi, arg); +} - case CDROM_GET_CAPABILITY: { - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); - return (cdo->capability & ~cdi->mask); - } +static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); + + if (!CDROM_CAN(CDC_SELECT_DISC)) + return -ENOSYS; + + if (arg != CDSL_CURRENT && arg != CDSL_NONE) { + if ((int)arg >= cdi->capacity) + return -EINVAL; + } + + /* + * ->select_disc is a hook to allow a driver-specific way of + * seleting disc. However, since there is no equivalent hook for + * cdrom_slot_status this may not actually be useful... + */ + if (cdi->ops->select_disc) + return cdi->ops->select_disc(cdi, arg); + + cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); + return cdrom_select_disc(cdi, arg); +} + +static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, + struct block_device *bdev) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n"); + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (!CDROM_CAN(CDC_RESET)) + return -ENOSYS; + invalidate_bdev(bdev, 0); + return cdi->ops->reset(cdi); +} + +static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); + + if (!CDROM_CAN(CDC_LOCK)) + return -EDRIVE_CANT_DO_THIS; + + keeplocked = arg ? 1 : 0; + + /* + * Don't unlock the door on multiple opens by default, but allow + * root to do so. + */ + if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN)) + return -EBUSY; + return cdi->ops->lock_door(cdi, arg); +} + +static int cdrom_ioctl_debug(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + debug = arg ? 1 : 0; + return debug; +} -/* The following function is implemented, although very few audio +static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); + return (cdi->ops->capability & ~cdi->mask); +} + +/* + * The following function is implemented, although very few audio * discs give Universal Product Code information, which should just be * the Medium Catalog Number on the box. Note, that the way the code * is written on the CD is /not/ uniform across all discs! */ - case CDROM_GET_MCN: { - struct cdrom_mcn mcn; - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); - if (!(cdo->capability & CDC_MCN)) - return -ENOSYS; - if ((ret=cdo->get_mcn(cdi, &mcn))) - return ret; - IOCTL_OUT(arg, struct cdrom_mcn, mcn); - cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); - return 0; - } +static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_mcn mcn; + int ret; - case CDROM_DRIVE_STATUS: { - cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); - if (!(cdo->capability & CDC_DRIVE_STATUS)) - return -ENOSYS; - if (!CDROM_CAN(CDC_SELECT_DISC)) - return cdo->drive_status(cdi, CDSL_CURRENT); - if ((arg == CDSL_CURRENT) || (arg == CDSL_NONE)) - return cdo->drive_status(cdi, CDSL_CURRENT); - if (((int)arg >= cdi->capacity)) - return -EINVAL; - return cdrom_slot_status(cdi, arg); - } + cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); - /* Ok, this is where problems start. The current interface for the - CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption - that CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunatly, - while this is often the case, it is also very common for CDs to - have some tracks with data, and some tracks with audio. Just - because I feel like it, I declare the following to be the best - way to cope. If the CD has ANY data tracks on it, it will be - returned as a data CD. If it has any XA tracks, I will return - it as that. Now I could simplify this interface by combining these - returns with the above, but this more clearly demonstrates - the problem with the current interface. Too bad this wasn't - designed to use bitmasks... -Erik - - Well, now we have the option CDS_MIXED: a mixed-type CD. - User level programmers might feel the ioctl is not very useful. - ---david - */ - case CDROM_DISC_STATUS: { - tracktype tracks; - cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); - cdrom_count_tracks(cdi, &tracks); - if (tracks.error) - return(tracks.error); - - /* Policy mode on */ - if (tracks.audio > 0) { - if (tracks.data==0 && tracks.cdi==0 && tracks.xa==0) - return CDS_AUDIO; - else - return CDS_MIXED; - } - if (tracks.cdi > 0) return CDS_XA_2_2; - if (tracks.xa > 0) return CDS_XA_2_1; - if (tracks.data > 0) return CDS_DATA_1; - /* Policy mode off */ + if (!(cdi->ops->capability & CDC_MCN)) + return -ENOSYS; + ret = cdi->ops->get_mcn(cdi, &mcn); + if (ret) + return ret; - cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); - return CDS_NO_INFO; - } + if (copy_to_user(argp, &mcn, sizeof(mcn))) + return -EFAULT; + cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); + return 0; +} - case CDROM_CHANGER_NSLOTS: { - cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); - return cdi->capacity; - } +static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, + unsigned long arg) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); + + if (!(cdi->ops->capability & CDC_DRIVE_STATUS)) + return -ENOSYS; + if (!CDROM_CAN(CDC_SELECT_DISC) || + (arg == CDSL_CURRENT || arg == CDSL_NONE)) + return cdi->ops->drive_status(cdi, CDSL_CURRENT); + if (((int)arg >= cdi->capacity)) + return -EINVAL; + return cdrom_slot_status(cdi, arg); +} + +/* + * Ok, this is where problems start. The current interface for the + * CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption that + * CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunatly, while this + * is often the case, it is also very common for CDs to have some tracks + * with data, and some tracks with audio. Just because I feel like it, + * I declare the following to be the best way to cope. If the CD has ANY + * data tracks on it, it will be returned as a data CD. If it has any XA + * tracks, I will return it as that. Now I could simplify this interface + * by combining these returns with the above, but this more clearly + * demonstrates the problem with the current interface. Too bad this + * wasn't designed to use bitmasks... -Erik + * + * Well, now we have the option CDS_MIXED: a mixed-type CD. + * User level programmers might feel the ioctl is not very useful. + * ---david + */ +static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi) +{ + tracktype tracks; + + cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); + + cdrom_count_tracks(cdi, &tracks); + if (tracks.error) + return tracks.error; + + /* Policy mode on */ + if (tracks.audio > 0) { + if (!tracks.data && !tracks.cdi && !tracks.xa) + return CDS_AUDIO; + else + return CDS_MIXED; } - /* use the ioctls that are implemented through the generic_packet() - interface. this may look at bit funny, but if -ENOTTY is - returned that particular ioctl is not implemented and we - let it go through the device specific ones. */ + if (tracks.cdi > 0) + return CDS_XA_2_2; + if (tracks.xa > 0) + return CDS_XA_2_1; + if (tracks.data > 0) + return CDS_DATA_1; + /* Policy mode off */ + + cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); + return CDS_NO_INFO; +} + +static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi) +{ + cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); + return cdi->capacity; +} + +static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_subchnl q; + u8 requested, back; + int ret; + + /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + if (copy_from_user(&q, argp, sizeof(q))) + return -EFAULT; + + requested = q.cdsc_format; + if (requested != CDROM_MSF && requested != CDROM_LBA) + return -EINVAL; + q.cdsc_format = CDROM_MSF; + + ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q); + if (ret) + return ret; + + back = q.cdsc_format; /* local copy */ + sanitize_format(&q.cdsc_absaddr, &back, requested); + sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); + + if (copy_to_user(argp, &q, sizeof(q))) + return -EFAULT; + /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ + return 0; +} + +static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_tochdr header; + int ret; + + /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + if (copy_from_user(&header, argp, sizeof(header))) + return -EFAULT; + + ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header); + if (ret) + return ret; + + if (copy_to_user(argp, &header, sizeof(header))) + return -EFAULT; + /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ + return 0; +} + +static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_tocentry entry; + u8 requested_format; + int ret; + + /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + if (copy_from_user(&entry, argp, sizeof(entry))) + return -EFAULT; + + requested_format = entry.cdte_format; + if (requested_format != CDROM_MSF && requested_format != CDROM_LBA) + return -EINVAL; + /* make interface to low-level uniform */ + entry.cdte_format = CDROM_MSF; + ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry); + if (ret) + return ret; + sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format); + + if (copy_to_user(argp, &entry, sizeof(entry))) + return -EFAULT; + /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ + return 0; +} + +static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_msf msf; + + cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + if (copy_from_user(&msf, argp, sizeof(msf))) + return -EFAULT; + return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf); +} + +static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_ti ti; + int ret; + + cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + if (copy_from_user(&ti, argp, sizeof(ti))) + return -EFAULT; + + ret = check_for_audio_disc(cdi, cdi->ops); + if (ret) + return ret; + return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti); +} +static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_volctrl volume; + + cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + if (copy_from_user(&volume, argp, sizeof(volume))) + return -EFAULT; + return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume); +} + +static int cdrom_ioctl_volread(struct cdrom_device_info *cdi, + void __user *argp) +{ + struct cdrom_volctrl volume; + int ret; + + cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + + ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume); + if (ret) + return ret; + + if (copy_to_user(argp, &volume, sizeof(volume))) + return -EFAULT; + return 0; +} + +static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, + unsigned int cmd) +{ + int ret; + + cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + ret = check_for_audio_disc(cdi, cdi->ops); + if (ret) + return ret; + return cdi->ops->audio_ioctl(cdi, cmd, NULL); +} + +/* + * Just about every imaginable ioctl is supported in the Uniform layer + * these days. + * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). + */ +int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi, + struct inode *ip, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int ret; + + /* + * Try the generic SCSI command ioctl's first. + */ + ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, argp); + if (ret != -ENOTTY) + return ret; + + switch (cmd) { + case CDROMMULTISESSION: + return cdrom_ioctl_multisession(cdi, argp); + case CDROMEJECT: + return cdrom_ioctl_eject(cdi); + case CDROMCLOSETRAY: + return cdrom_ioctl_closetray(cdi); + case CDROMEJECT_SW: + return cdrom_ioctl_eject_sw(cdi, arg); + case CDROM_MEDIA_CHANGED: + return cdrom_ioctl_media_changed(cdi, arg); + case CDROM_SET_OPTIONS: + return cdrom_ioctl_set_options(cdi, arg); + case CDROM_CLEAR_OPTIONS: + return cdrom_ioctl_clear_options(cdi, arg); + case CDROM_SELECT_SPEED: + return cdrom_ioctl_select_speed(cdi, arg); + case CDROM_SELECT_DISC: + return cdrom_ioctl_select_disc(cdi, arg); + case CDROMRESET: + return cdrom_ioctl_reset(cdi, ip->i_bdev); + case CDROM_LOCKDOOR: + return cdrom_ioctl_lock_door(cdi, arg); + case CDROM_DEBUG: + return cdrom_ioctl_debug(cdi, arg); + case CDROM_GET_CAPABILITY: + return cdrom_ioctl_get_capability(cdi); + case CDROM_GET_MCN: + return cdrom_ioctl_get_mcn(cdi, argp); + case CDROM_DRIVE_STATUS: + return cdrom_ioctl_drive_status(cdi, arg); + case CDROM_DISC_STATUS: + return cdrom_ioctl_disc_status(cdi); + case CDROM_CHANGER_NSLOTS: + return cdrom_ioctl_changer_nslots(cdi); + } + + /* + * Use the ioctls that are implemented through the generic_packet() + * interface. this may look at bit funny, but if -ENOTTY is + * returned that particular ioctl is not implemented and we + * let it go through the device specific ones. + */ if (CDROM_CAN(CDC_GENERIC_PACKET)) { ret = mmc_ioctl(cdi, cmd, arg); - if (ret != -ENOTTY) { + if (ret != -ENOTTY) return ret; - } } - /* note: most of the cdinfo() calls are commented out here, - because they fill up the sys log when CD players poll - the drive. */ + /* + * Note: most of the cdinfo() calls are commented out here, + * because they fill up the sys log when CD players poll + * the drive. + */ switch (cmd) { - case CDROMSUBCHNL: { - struct cdrom_subchnl q; - u_char requested, back; - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ - IOCTL_IN(arg, struct cdrom_subchnl, q); - requested = q.cdsc_format; - if (!((requested == CDROM_MSF) || - (requested == CDROM_LBA))) - return -EINVAL; - q.cdsc_format = CDROM_MSF; - if ((ret=cdo->audio_ioctl(cdi, cmd, &q))) - return ret; - back = q.cdsc_format; /* local copy */ - sanitize_format(&q.cdsc_absaddr, &back, requested); - sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); - IOCTL_OUT(arg, struct cdrom_subchnl, q); - /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ - return 0; - } - case CDROMREADTOCHDR: { - struct cdrom_tochdr header; - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ - IOCTL_IN(arg, struct cdrom_tochdr, header); - if ((ret=cdo->audio_ioctl(cdi, cmd, &header))) - return ret; - IOCTL_OUT(arg, struct cdrom_tochdr, header); - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ - return 0; - } - case CDROMREADTOCENTRY: { - struct cdrom_tocentry entry; - u_char requested_format; - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ - IOCTL_IN(arg, struct cdrom_tocentry, entry); - requested_format = entry.cdte_format; - if (!((requested_format == CDROM_MSF) || - (requested_format == CDROM_LBA))) - return -EINVAL; - /* make interface to low-level uniform */ - entry.cdte_format = CDROM_MSF; - if ((ret=cdo->audio_ioctl(cdi, cmd, &entry))) - return ret; - sanitize_format(&entry.cdte_addr, - &entry.cdte_format, requested_format); - IOCTL_OUT(arg, struct cdrom_tocentry, entry); - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ - return 0; - } - case CDROMPLAYMSF: { - struct cdrom_msf msf; - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); - IOCTL_IN(arg, struct cdrom_msf, msf); - return cdo->audio_ioctl(cdi, cmd, &msf); - } - case CDROMPLAYTRKIND: { - struct cdrom_ti ti; - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); - IOCTL_IN(arg, struct cdrom_ti, ti); - CHECKAUDIO; - return cdo->audio_ioctl(cdi, cmd, &ti); - } - case CDROMVOLCTRL: { - struct cdrom_volctrl volume; - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); - IOCTL_IN(arg, struct cdrom_volctrl, volume); - return cdo->audio_ioctl(cdi, cmd, &volume); - } - case CDROMVOLREAD: { - struct cdrom_volctrl volume; - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); - if ((ret=cdo->audio_ioctl(cdi, cmd, &volume))) - return ret; - IOCTL_OUT(arg, struct cdrom_volctrl, volume); - return 0; - } + case CDROMSUBCHNL: + return cdrom_ioctl_get_subchnl(cdi, argp); + case CDROMREADTOCHDR: + return cdrom_ioctl_read_tochdr(cdi, argp); + case CDROMREADTOCENTRY: + return cdrom_ioctl_read_tocentry(cdi, argp); + case CDROMPLAYMSF: + return cdrom_ioctl_play_msf(cdi, argp); + case CDROMPLAYTRKIND: + return cdrom_ioctl_play_trkind(cdi, argp); + case CDROMVOLCTRL: + return cdrom_ioctl_volctrl(cdi, argp); + case CDROMVOLREAD: + return cdrom_ioctl_volread(cdi, argp); case CDROMSTART: case CDROMSTOP: case CDROMPAUSE: - case CDROMRESUME: { - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); - CHECKAUDIO; - return cdo->audio_ioctl(cdi, cmd, NULL); - } - } /* switch */ + case CDROMRESUME: + return cdrom_ioctl_audioctl(cdi, cmd); + } - /* do the device specific ioctls */ - if (CDROM_CAN(CDC_IOCTLS)) - return cdo->dev_ioctl(cdi, cmd, arg); - return -ENOSYS; } diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c index 378e88d2075..72ffd64e8b1 100644 --- a/drivers/cdrom/cdu31a.c +++ b/drivers/cdrom/cdu31a.c @@ -2668,7 +2668,7 @@ static int scd_audio_ioctl(struct cdrom_device_info *cdi, return retval; } -static int scd_dev_ioctl(struct cdrom_device_info *cdi, +static int scd_read_audio(struct cdrom_device_info *cdi, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -2894,11 +2894,10 @@ static struct cdrom_device_ops scd_dops = { .get_mcn = scd_get_mcn, .reset = scd_reset, .audio_ioctl = scd_audio_ioctl, - .dev_ioctl = scd_dev_ioctl, .capability = CDC_OPEN_TRAY | CDC_CLOSE_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | - CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS, + CDC_RESET | CDC_DRIVE_STATUS, .n_minors = 1, }; @@ -2936,6 +2935,9 @@ static int scd_block_ioctl(struct inode *inode, struct file *file, case CDROMCLOSETRAY: retval = scd_tray_move(&scd_info, 0); break; + case CDROMREADAUDIO: + retval = scd_read_audio(&scd_info, CDROMREADAUDIO, arg); + break; default: retval = cdrom_ioctl(file, &scd_info, inode, cmd, arg); } diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c index ce127f7ec0f..fad27a87ce3 100644 --- a/drivers/cdrom/cm206.c +++ b/drivers/cdrom/cm206.c @@ -1157,32 +1157,6 @@ static int cm206_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, } } -/* Ioctl. These ioctls are specific to the cm206 driver. I have made - some driver statistics accessible through ioctl calls. - */ - -static int cm206_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, - unsigned long arg) -{ - switch (cmd) { -#ifdef STATISTICS - case CM206CTL_GET_STAT: - if (arg >= NR_STATS) - return -EINVAL; - else - return cd->stats[arg]; - case CM206CTL_GET_LAST_STAT: - if (arg >= NR_STATS) - return -EINVAL; - else - return cd->last_stat[arg]; -#endif - default: - debug(("Unknown ioctl call 0x%x\n", cmd)); - return -EINVAL; - } -} - static int cm206_media_changed(struct cdrom_device_info *cdi, int disc_nr) { if (cd != NULL) { @@ -1321,11 +1295,10 @@ static struct cdrom_device_ops cm206_dops = { .get_mcn = cm206_get_upc, .reset = cm206_reset, .audio_ioctl = cm206_audio_ioctl, - .dev_ioctl = cm206_ioctl, .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | CDC_MCN | CDC_PLAY_AUDIO | CDC_SELECT_SPEED | - CDC_IOCTLS | CDC_DRIVE_STATUS, + CDC_DRIVE_STATUS, .n_minors = 1, }; @@ -1350,6 +1323,21 @@ static int cm206_block_release(struct inode *inode, struct file *file) static int cm206_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { + switch (cmd) { +#ifdef STATISTICS + case CM206CTL_GET_STAT: + if (arg >= NR_STATS) + return -EINVAL; + return cd->stats[arg]; + case CM206CTL_GET_LAST_STAT: + if (arg >= NR_STATS) + return -EINVAL; + return cd->last_stat[arg]; +#endif + default: + break; + } + return cdrom_ioctl(file, &cm206_info, inode, cmd, arg); } diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c index 466e9c2974b..4760f515f59 100644 --- a/drivers/cdrom/sbpcd.c +++ b/drivers/cdrom/sbpcd.c @@ -4160,332 +4160,6 @@ static int sbpcd_get_last_session(struct cdrom_device_info *cdi, struct cdrom_mu return 0; } -/*==========================================================================*/ -/*==========================================================================*/ -/* - * ioctl support - */ -static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd, - u_long arg) -{ - struct sbpcd_drive *p = cdi->handle; - int i; - - msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg); - if (p->drv_id==-1) { - msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name); - return (-ENXIO); /* no such drive */ - } - down(&ioctl_read_sem); - if (p != current_drive) - switch_drive(p); - - msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd); - switch (cmd) /* Sun-compatible */ - { - case DDIOCSDBG: /* DDI Debug */ - if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM); - i=sbpcd_dbg_ioctl(arg,1); - RETURN_UP(i); - case CDROMRESET: /* hard reset the drive */ - msg(DBG_IOC,"ioctl: CDROMRESET entered.\n"); - i=DriveReset(); - current_drive->audio_state=0; - RETURN_UP(i); - - case CDROMREADMODE1: - msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n"); -#ifdef SAFE_MIXED - if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ - cc_ModeSelect(CD_FRAMESIZE); - cc_ModeSense(); - current_drive->mode=READ_M1; - RETURN_UP(0); - - case CDROMREADMODE2: /* not usable at the moment */ - msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n"); -#ifdef SAFE_MIXED - if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ - cc_ModeSelect(CD_FRAMESIZE_RAW1); - cc_ModeSense(); - current_drive->mode=READ_M2; - RETURN_UP(0); - - case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */ - msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n"); - if (current_drive->sbp_audsiz>0) - vfree(current_drive->aud_buf); - current_drive->aud_buf=NULL; - current_drive->sbp_audsiz=arg; - - if (current_drive->sbp_audsiz>16) - { - current_drive->sbp_audsiz = 0; - RETURN_UP(current_drive->sbp_audsiz); - } - - if (current_drive->sbp_audsiz>0) - { - current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW); - if (current_drive->aud_buf==NULL) - { - msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz); - current_drive->sbp_audsiz=0; - } - else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz); - } - RETURN_UP(current_drive->sbp_audsiz); - - case CDROMREADAUDIO: - { /* start of CDROMREADAUDIO */ - int i=0, j=0, frame, block=0; - u_int try=0; - u_long timeout; - u_char *p; - u_int data_tries = 0; - u_int data_waits = 0; - u_int data_retrying = 0; - int status_tries; - int error_flag; - - msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n"); - if (fam0_drive) RETURN_UP(-EINVAL); - if (famL_drive) RETURN_UP(-EINVAL); - if (famV_drive) RETURN_UP(-EINVAL); - if (famT_drive) RETURN_UP(-EINVAL); -#ifdef SAFE_MIXED - if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ - if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL); - if (copy_from_user(&read_audio, (void __user *)arg, - sizeof(struct cdrom_read_audio))) - RETURN_UP(-EFAULT); - if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL); - if (!access_ok(VERIFY_WRITE, read_audio.buf, - read_audio.nframes*CD_FRAMESIZE_RAW)) - RETURN_UP(-EFAULT); - - if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */ - block=msf2lba(&read_audio.addr.msf.minute); - else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */ - block=read_audio.addr.lba; - else RETURN_UP(-EINVAL); -#if 000 - i=cc_SetSpeed(speed_150,0,0); - if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i); -#endif - msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n", - block, blk2msf(block)); - msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n"); -#if OLD_BUSY - while (busy_data) sbp_sleep(HZ/10); /* wait a bit */ - busy_audio=1; -#endif /* OLD_BUSY */ - error_flag=0; - for (data_tries=5; data_tries>0; data_tries--) - { - msg(DBG_AUD,"data_tries=%d ...\n", data_tries); - current_drive->mode=READ_AU; - cc_ModeSelect(CD_FRAMESIZE_RAW); - cc_ModeSense(); - for (status_tries=3; status_tries > 0; status_tries--) - { - flags_cmd_out |= f_respo3; - cc_ReadStatus(); - if (sbp_status() != 0) break; - if (st_check) cc_ReadError(); - sbp_sleep(1); /* wait a bit, try again */ - } - if (status_tries == 0) - { - msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__); - continue; - } - msg(DBG_AUD,"read_audio: sbp_status: ok.\n"); - - flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; - if (fam0L_drive) - { - flags_cmd_out |= f_lopsta | f_getsta | f_bit1; - cmd_type=READ_M2; - drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */ - drvcmd[1]=(block>>16)&0x000000ff; - drvcmd[2]=(block>>8)&0x000000ff; - drvcmd[3]=block&0x000000ff; - drvcmd[4]=0; - drvcmd[5]=read_audio.nframes; /* # of frames */ - drvcmd[6]=0; - } - else if (fam1_drive) - { - drvcmd[0]=CMD1_READ; /* "read frames", new drives */ - lba2msf(block,&drvcmd[1]); /* msf-bin format required */ - drvcmd[4]=0; - drvcmd[5]=0; - drvcmd[6]=read_audio.nframes; /* # of frames */ - } - else if (fam2_drive) - { - drvcmd[0]=CMD2_READ_XA2; - lba2msf(block,&drvcmd[1]); /* msf-bin format required */ - drvcmd[4]=0; - drvcmd[5]=read_audio.nframes; /* # of frames */ - drvcmd[6]=0x11; /* raw mode */ - } - else if (famT_drive) /* CD-55A: not tested yet */ - { - } - msg(DBG_AUD,"read_audio: before giving \"read\" command.\n"); - flags_cmd_out=f_putcmd; - response_count=0; - i=cmd_out(); - if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i); - sbp_sleep(0); - msg(DBG_AUD,"read_audio: after giving \"read\" command.\n"); - for (frame=1;frame<2 && !error_flag; frame++) - { - try=maxtim_data; - for (timeout=jiffies+9*HZ; ; ) - { - for ( ; try!=0;try--) - { - j=inb(CDi_status); - if (!(j&s_not_data_ready)) break; - if (!(j&s_not_result_ready)) break; - if (fam0L_drive) if (j&s_attention) break; - } - if (try != 0 || time_after_eq(jiffies, timeout)) break; - if (data_retrying == 0) data_waits++; - data_retrying = 1; - sbp_sleep(1); - try = 1; - } - if (try==0) - { - msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n"); - error_flag++; - break; - } - msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n"); - if (j&s_not_data_ready) - { - msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n"); - error_flag++; - break; - } - msg(DBG_AUD,"read_audio: before reading data.\n"); - error_flag=0; - p = current_drive->aud_buf; - if (sbpro_type==1) OUT(CDo_sel_i_d,1); - if (do_16bit) - { - u_short *p2 = (u_short *) p; - - for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) - { - if ((inb_p(CDi_status)&s_not_data_ready)) continue; - - /* get one sample */ - *p2++ = inw_p(CDi_data); - *p2++ = inw_p(CDi_data); - } - } else { - for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) - { - if ((inb_p(CDi_status)&s_not_data_ready)) continue; - - /* get one sample */ - *p++ = inb_p(CDi_data); - *p++ = inb_p(CDi_data); - *p++ = inb_p(CDi_data); - *p++ = inb_p(CDi_data); - } - } - if (sbpro_type==1) OUT(CDo_sel_i_d,0); - data_retrying = 0; - } - msg(DBG_AUD,"read_audio: after reading data.\n"); - if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */ - { - msg(DBG_AUD,"read_audio: read aborted by drive\n"); -#if 0000 - i=cc_DriveReset(); /* ugly fix to prevent a hang */ -#else - i=cc_ReadError(); -#endif - continue; - } - if (fam0L_drive) - { - i=maxtim_data; - for (timeout=jiffies+9*HZ; time_before(jiffies, timeout); timeout--) - { - for ( ;i!=0;i--) - { - j=inb(CDi_status); - if (!(j&s_not_data_ready)) break; - if (!(j&s_not_result_ready)) break; - if (j&s_attention) break; - } - if (i != 0 || time_after_eq(jiffies, timeout)) break; - sbp_sleep(0); - i = 1; - } - if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ"); - if (!(j&s_attention)) - { - msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n"); - i=cc_DriveReset(); /* ugly fix to prevent a hang */ - continue; - } - } - do - { - if (fam0L_drive) cc_ReadStatus(); - i=ResponseStatus(); /* builds status_bits, returns orig. status (old) or faked p_success (new) */ - if (i<0) { msg(DBG_AUD, - "read_audio: cc_ReadStatus error after read: %02X\n", - current_drive->status_bits); - continue; /* FIXME */ - } - } - while ((fam0L_drive)&&(!st_check)&&(!(i&p_success))); - if (st_check) - { - i=cc_ReadError(); - msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i); - continue; - } - if (copy_to_user(read_audio.buf, - current_drive->aud_buf, - read_audio.nframes * CD_FRAMESIZE_RAW)) - RETURN_UP(-EFAULT); - msg(DBG_AUD,"read_audio: copy_to_user done.\n"); - break; - } - cc_ModeSelect(CD_FRAMESIZE); - cc_ModeSense(); - current_drive->mode=READ_M1; -#if OLD_BUSY - busy_audio=0; -#endif /* OLD_BUSY */ - if (data_tries == 0) - { - msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__); - RETURN_UP(-EIO); - } - msg(DBG_AUD,"read_audio: successful return.\n"); - RETURN_UP(0); - } /* end of CDROMREADAUDIO */ - - default: - msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); - RETURN_UP(-EINVAL); - } /* end switch(cmd) */ -} - static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, void * arg) { @@ -4530,7 +4204,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, default: RETURN_UP(-EINVAL); } - + case CDROMRESUME: /* resume paused audio play */ msg(DBG_IOC,"ioctl: CDROMRESUME entered.\n"); /* resume playing audio tracks when a previous PLAY AUDIO call has */ @@ -4544,12 +4218,12 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, if (i<0) RETURN_UP(-EIO); current_drive->audio_state=audio_playing; RETURN_UP(0); - + case CDROMPLAYMSF: msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n"); #ifdef SAFE_MIXED if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ +#endif /* SAFE_MIXED */ if (current_drive->audio_state==audio_playing) { i=cc_Pause_Resume(1); @@ -4584,7 +4258,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n"); #ifdef SAFE_MIXED if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ +#endif /* SAFE_MIXED */ if (current_drive->audio_state==audio_playing) { msg(DBG_IOX,"CDROMPLAYTRKIND: already audio_playing.\n"); @@ -4654,13 +4328,13 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, cc_DriveReset(); #endif RETURN_UP(i); - + case CDROMSTART: /* Spin up the drive */ msg(DBG_IOC,"ioctl: CDROMSTART entered.\n"); cc_SpinUp(); current_drive->audio_state=0; RETURN_UP(0); - + case CDROMVOLCTRL: /* Volume control */ msg(DBG_IOC,"ioctl: CDROMVOLCTRL entered.\n"); memcpy(&volctrl,(char *) arg,sizeof(volctrl)); @@ -4670,7 +4344,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, current_drive->vol_ctrl1=volctrl.channel1; i=cc_SetVolume(); RETURN_UP(0); - + case CDROMVOLREAD: /* read Volume settings from drive */ msg(DBG_IOC,"ioctl: CDROMVOLREAD entered.\n"); st=cc_GetVolume(); @@ -4694,7 +4368,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, if (i<0) { j=cc_ReadError(); /* clear out error status from drive */ current_drive->audio_state=CDROM_AUDIO_NO_STATUS; - /* get and set the disk state here, + /* get and set the disk state here, probably not the right place, but who cares! It makes it work properly! --AJK */ if (current_drive->CD_changed==0xFF) { @@ -4715,8 +4389,8 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, } } memcpy(&SC, (void *) arg, sizeof(struct cdrom_subchnl)); - /* - This virtual crap is very bogus! + /* + This virtual crap is very bogus! It doesn't detect when the cd is done playing audio! Lets do this right with proper hardware register reading! */ @@ -4775,7 +4449,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, SC.cdsc_trk,SC.cdsc_ind, SC.cdsc_absaddr,SC.cdsc_reladdr); RETURN_UP(0); - + default: msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); RETURN_UP(-EINVAL); @@ -4788,7 +4462,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, static void sbp_transfer(struct request *req) { long offs; - + while ( (req->nr_sectors > 0) && (req->sector/4 >= current_drive->sbp_first_frame) && (req->sector/4 <= current_drive->sbp_last_frame) ) @@ -4807,11 +4481,11 @@ static void sbp_transfer(struct request *req) * * This is a kludge so we don't need to modify end_request. * We put the req we take out after INIT_REQUEST in the requests list, - * so that end_request will discard it. + * so that end_request will discard it. * * The bug could be present in other block devices, perhaps we * should modify INIT_REQUEST and end_request instead, and - * change every block device.. + * change every block device.. * * Could be a race here?? Could e.g. a timer interrupt schedule() us? * If so, we should copy end_request here, and do it right.. (or @@ -4883,19 +4557,19 @@ static void do_sbpcd_request(request_queue_t * q) while (busy_audio) sbp_sleep(HZ); /* wait a bit */ busy_data=1; #endif /* OLD_BUSY */ - + if (p->audio_state==audio_playing) goto err_done; if (p != current_drive) switch_drive(p); block = req->sector; /* always numbered as 512-byte-pieces */ nsect = req->nr_sectors; /* always counted as 512-byte-pieces */ - + msg(DBG_BSZ,"read sector %d (%d sectors)\n", block, nsect); #if 0 msg(DBG_MUL,"read LBA %d\n", block/4); #endif - + sbp_transfer(req); /* if we satisfied the request from the buffer, we're done. */ if (req->nr_sectors == 0) @@ -4914,10 +4588,10 @@ static void do_sbpcd_request(request_queue_t * q) i=prepare(0,0); /* at moment not really a hassle check, but ... */ if (i!=0) msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i); -#endif /* FUTURE */ - +#endif /* FUTURE */ + if (!st_spinning) cc_SpinUp(); - + for (data_tries=n_retries; data_tries > 0; data_tries--) { for (status_tries=3; status_tries > 0; status_tries--) @@ -4940,7 +4614,7 @@ static void do_sbpcd_request(request_queue_t * q) { #ifdef SAFE_MIXED current_drive->has_data=2; /* is really a data disk */ -#endif /* SAFE_MIXED */ +#endif /* SAFE_MIXED */ #ifdef DEBUG_GTL printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n", xnr, req, req->sector, req->nr_sectors, jiffies); @@ -4951,7 +4625,7 @@ static void do_sbpcd_request(request_queue_t * q) goto request_loop; } } - + err_done: #if OLD_BUSY busy_data=0; @@ -4976,7 +4650,7 @@ static void sbp_read_cmd(struct request *req) int i; int block; - + current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1; /* purge buffer */ current_drive->sbp_current = 0; block=req->sector/4; @@ -4993,7 +4667,7 @@ static void sbp_read_cmd(struct request *req) current_drive->sbp_read_frames=1; } } - + flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; clr_cmdbuf(); if (famV_drive) @@ -5092,7 +4766,7 @@ static int sbp_data(struct request *req) int success; int wait; int duration; - + error_flag=0; success=0; #if LONG_TIMING @@ -5105,12 +4779,12 @@ static int sbp_data(struct request *req) for (frame=0;frame<current_drive->sbp_read_frames&&!error_flag; frame++) { SBPCD_CLI; - + del_timer(&data_timer); data_timer.expires=jiffies+max_latency; timed_out_data=0; add_timer(&data_timer); - while (!timed_out_data) + while (!timed_out_data) { if (current_drive->f_multisession) try=maxtim_data*4; else try=maxtim_data; @@ -5207,9 +4881,9 @@ static int sbp_data(struct request *req) else { sbp_sleep(1); - OUT(CDo_sel_i_d,0); + OUT(CDo_sel_i_d,0); i=inb(CDi_status); - } + } if (!(i&s_not_data_ready)) { OUT(CDo_sel_i_d,1); @@ -5311,7 +4985,7 @@ static int sbp_data(struct request *req) } SBPCD_STI; } - + #if 0 if (!success) #endif @@ -5370,7 +5044,326 @@ static int sbpcd_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; - return cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg); + struct cdrom_device_info *cdi = p->sbpcd_infop; + int ret, i; + + ret = cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg); + if (ret != -ENOSYS) + return ret; + + msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg); + if (p->drv_id==-1) { + msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name); + return (-ENXIO); /* no such drive */ + } + down(&ioctl_read_sem); + if (p != current_drive) + switch_drive(p); + + msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd); + switch (cmd) /* Sun-compatible */ + { + case DDIOCSDBG: /* DDI Debug */ + if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM); + i=sbpcd_dbg_ioctl(arg,1); + RETURN_UP(i); + case CDROMRESET: /* hard reset the drive */ + msg(DBG_IOC,"ioctl: CDROMRESET entered.\n"); + i=DriveReset(); + current_drive->audio_state=0; + RETURN_UP(i); + + case CDROMREADMODE1: + msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n"); +#ifdef SAFE_MIXED + if (current_drive->has_data>1) RETURN_UP(-EBUSY); +#endif /* SAFE_MIXED */ + cc_ModeSelect(CD_FRAMESIZE); + cc_ModeSense(); + current_drive->mode=READ_M1; + RETURN_UP(0); + + case CDROMREADMODE2: /* not usable at the moment */ + msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n"); +#ifdef SAFE_MIXED + if (current_drive->has_data>1) RETURN_UP(-EBUSY); +#endif /* SAFE_MIXED */ + cc_ModeSelect(CD_FRAMESIZE_RAW1); + cc_ModeSense(); + current_drive->mode=READ_M2; + RETURN_UP(0); + + case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */ + msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n"); + if (current_drive->sbp_audsiz>0) + vfree(current_drive->aud_buf); + current_drive->aud_buf=NULL; + current_drive->sbp_audsiz=arg; + + if (current_drive->sbp_audsiz>16) + { + current_drive->sbp_audsiz = 0; + RETURN_UP(current_drive->sbp_audsiz); + } + + if (current_drive->sbp_audsiz>0) + { + current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW); + if (current_drive->aud_buf==NULL) + { + msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz); + current_drive->sbp_audsiz=0; + } + else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz); + } + RETURN_UP(current_drive->sbp_audsiz); + + case CDROMREADAUDIO: + { /* start of CDROMREADAUDIO */ + int i=0, j=0, frame, block=0; + u_int try=0; + u_long timeout; + u_char *p; + u_int data_tries = 0; + u_int data_waits = 0; + u_int data_retrying = 0; + int status_tries; + int error_flag; + + msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n"); + if (fam0_drive) RETURN_UP(-EINVAL); + if (famL_drive) RETURN_UP(-EINVAL); + if (famV_drive) RETURN_UP(-EINVAL); + if (famT_drive) RETURN_UP(-EINVAL); +#ifdef SAFE_MIXED + if (current_drive->has_data>1) RETURN_UP(-EBUSY); +#endif /* SAFE_MIXED */ + if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL); + if (copy_from_user(&read_audio, (void __user *)arg, + sizeof(struct cdrom_read_audio))) + RETURN_UP(-EFAULT); + if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL); + if (!access_ok(VERIFY_WRITE, read_audio.buf, + read_audio.nframes*CD_FRAMESIZE_RAW)) + RETURN_UP(-EFAULT); + + if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */ + block=msf2lba(&read_audio.addr.msf.minute); + else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */ + block=read_audio.addr.lba; + else RETURN_UP(-EINVAL); +#if 000 + i=cc_SetSpeed(speed_150,0,0); + if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i); +#endif + msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n", + block, blk2msf(block)); + msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n"); +#if OLD_BUSY + while (busy_data) sbp_sleep(HZ/10); /* wait a bit */ + busy_audio=1; +#endif /* OLD_BUSY */ + error_flag=0; + for (data_tries=5; data_tries>0; data_tries--) + { + msg(DBG_AUD,"data_tries=%d ...\n", data_tries); + current_drive->mode=READ_AU; + cc_ModeSelect(CD_FRAMESIZE_RAW); + cc_ModeSense(); + for (status_tries=3; status_tries > 0; status_tries--) + { + flags_cmd_out |= f_respo3; + cc_ReadStatus(); + if (sbp_status() != 0) break; + if (st_check) cc_ReadError(); + sbp_sleep(1); /* wait a bit, try again */ + } + if (status_tries == 0) + { + msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__); + continue; + } + msg(DBG_AUD,"read_audio: sbp_status: ok.\n"); + + flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; + if (fam0L_drive) + { + flags_cmd_out |= f_lopsta | f_getsta | f_bit1; + cmd_type=READ_M2; + drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */ + drvcmd[1]=(block>>16)&0x000000ff; + drvcmd[2]=(block>>8)&0x000000ff; + drvcmd[3]=block&0x000000ff; + drvcmd[4]=0; + drvcmd[5]=read_audio.nframes; /* # of frames */ + drvcmd[6]=0; + } + else if (fam1_drive) + { + drvcmd[0]=CMD1_READ; /* "read frames", new drives */ + lba2msf(block,&drvcmd[1]); /* msf-bin format required */ + drvcmd[4]=0; + drvcmd[5]=0; + drvcmd[6]=read_audio.nframes; /* # of frames */ + } + else if (fam2_drive) + { + drvcmd[0]=CMD2_READ_XA2; + lba2msf(block,&drvcmd[1]); /* msf-bin format required */ + drvcmd[4]=0; + drvcmd[5]=read_audio.nframes; /* # of frames */ + drvcmd[6]=0x11; /* raw mode */ + } + else if (famT_drive) /* CD-55A: not tested yet */ + { + } + msg(DBG_AUD,"read_audio: before giving \"read\" command.\n"); + flags_cmd_out=f_putcmd; + response_count=0; + i=cmd_out(); + if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i); + sbp_sleep(0); + msg(DBG_AUD,"read_audio: after giving \"read\" command.\n"); + for (frame=1;frame<2 && !error_flag; frame++) + { + try=maxtim_data; + for (timeout=jiffies+9*HZ; ; ) + { + for ( ; try!=0;try--) + { + j=inb(CDi_status); + if (!(j&s_not_data_ready)) break; + if (!(j&s_not_result_ready)) break; + if (fam0L_drive) if (j&s_attention) break; + } + if (try != 0 || time_after_eq(jiffies, timeout)) break; + if (data_retrying == 0) data_waits++; + data_retrying = 1; + sbp_sleep(1); + try = 1; + } + if (try==0) + { + msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n"); + error_flag++; + break; + } + msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n"); + if (j&s_not_data_ready) + { + msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n"); + error_flag++; + break; + } + msg(DBG_AUD,"read_audio: before reading data.\n"); + error_flag=0; + p = current_drive->aud_buf; + if (sbpro_type==1) OUT(CDo_sel_i_d,1); + if (do_16bit) + { + u_short *p2 = (u_short *) p; + + for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) + { + if ((inb_p(CDi_status)&s_not_data_ready)) continue; + + /* get one sample */ + *p2++ = inw_p(CDi_data); + *p2++ = inw_p(CDi_data); + } + } else { + for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) + { + if ((inb_p(CDi_status)&s_not_data_ready)) continue; + + /* get one sample */ + *p++ = inb_p(CDi_data); + *p++ = inb_p(CDi_data); + *p++ = inb_p(CDi_data); + *p++ = inb_p(CDi_data); + } + } + if (sbpro_type==1) OUT(CDo_sel_i_d,0); + data_retrying = 0; + } + msg(DBG_AUD,"read_audio: after reading data.\n"); + if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */ + { + msg(DBG_AUD,"read_audio: read aborted by drive\n"); +#if 0000 + i=cc_DriveReset(); /* ugly fix to prevent a hang */ +#else + i=cc_ReadError(); +#endif + continue; + } + if (fam0L_drive) + { + i=maxtim_data; + for (timeout=jiffies+9*HZ; time_before(jiffies, timeout); timeout--) + { + for ( ;i!=0;i--) + { + j=inb(CDi_status); + if (!(j&s_not_data_ready)) break; + if (!(j&s_not_result_ready)) break; + if (j&s_attention) break; + } + if (i != 0 || time_after_eq(jiffies, timeout)) break; + sbp_sleep(0); + i = 1; + } + if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ"); + if (!(j&s_attention)) + { + msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n"); + i=cc_DriveReset(); /* ugly fix to prevent a hang */ + continue; + } + } + do + { + if (fam0L_drive) cc_ReadStatus(); + i=ResponseStatus(); /* builds status_bits, returns orig. status (old) or faked p_success (new) */ + if (i<0) { msg(DBG_AUD, + "read_audio: cc_ReadStatus error after read: %02X\n", + current_drive->status_bits); + continue; /* FIXME */ + } + } + while ((fam0L_drive)&&(!st_check)&&(!(i&p_success))); + if (st_check) + { + i=cc_ReadError(); + msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i); + continue; + } + if (copy_to_user(read_audio.buf, + current_drive->aud_buf, + read_audio.nframes * CD_FRAMESIZE_RAW)) + RETURN_UP(-EFAULT); + msg(DBG_AUD,"read_audio: copy_to_user done.\n"); + break; + } + cc_ModeSelect(CD_FRAMESIZE); + cc_ModeSense(); + current_drive->mode=READ_M1; +#if OLD_BUSY + busy_audio=0; +#endif /* OLD_BUSY */ + if (data_tries == 0) + { + msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__); + RETURN_UP(-EIO); + } + msg(DBG_AUD,"read_audio: successful return.\n"); + RETURN_UP(0); + } /* end of CDROMREADAUDIO */ + + default: + msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); + RETURN_UP(-EINVAL); + } /* end switch(cmd) */ } static int sbpcd_block_media_changed(struct gendisk *disk) @@ -5478,10 +5471,9 @@ static struct cdrom_device_ops sbpcd_dops = { .get_mcn = sbpcd_get_mcn, .reset = sbpcd_reset, .audio_ioctl = sbpcd_audio_ioctl, - .dev_ioctl = sbpcd_dev_ioctl, .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | - CDC_MCN | CDC_PLAY_AUDIO | CDC_IOCTLS, + CDC_MCN | CDC_PLAY_AUDIO, .n_minors = 1, }; diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index e2761725955..c0f817ba7ad 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -627,7 +627,7 @@ static struct cdrom_device_ops viocd_dops = { .media_changed = viocd_media_changed, .lock_door = viocd_lock_door, .generic_packet = viocd_packet, - .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM + .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM }; static int __init find_capability(const char *type) diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 7ac365b5d9e..6602b3156df 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -46,8 +46,6 @@ /* Sanity checks */ -#define SERIAL_INLINE - #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s) @@ -95,10 +93,6 @@ static char *serial_version = "4.30"; #include <asm/amigahw.h> #include <asm/amigaints.h> -#ifdef SERIAL_INLINE -#define _INLINE_ inline -#endif - #define custom amiga_custom static char *serial_name = "Amiga-builtin serial driver"; @@ -253,14 +247,14 @@ static void rs_start(struct tty_struct *tty) * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ -static _INLINE_ void rs_sched_event(struct async_struct *info, - int event) +static void rs_sched_event(struct async_struct *info, + int event) { info->event |= 1 << event; tasklet_schedule(&info->tlet); } -static _INLINE_ void receive_chars(struct async_struct *info) +static void receive_chars(struct async_struct *info) { int status; int serdatr; @@ -349,7 +343,7 @@ out: return; } -static _INLINE_ void transmit_chars(struct async_struct *info) +static void transmit_chars(struct async_struct *info) { custom.intreq = IF_TBE; mb(); @@ -389,7 +383,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info) } } -static _INLINE_ void check_modem_status(struct async_struct *info) +static void check_modem_status(struct async_struct *info) { unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); unsigned char dstatus; @@ -1959,7 +1953,7 @@ done: * number, and identifies which options were configured into this * driver. */ -static _INLINE_ void show_serial_version(void) +static void show_serial_version(void) { printk(KERN_INFO "%s version %s\n", serial_name, serial_version); } diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c index e38a5f0e07b..5e59c0b4273 100644 --- a/drivers/char/generic_serial.c +++ b/drivers/char/generic_serial.c @@ -48,8 +48,8 @@ static int gs_debug; #define NEW_WRITE_LOCKING 1 #if NEW_WRITE_LOCKING #define DECL /* Nothing */ -#define LOCKIT down (& port->port_write_sem); -#define RELEASEIT up (&port->port_write_sem); +#define LOCKIT mutex_lock(& port->port_write_mutex); +#define RELEASEIT mutex_unlock(&port->port_write_mutex); #else #define DECL unsigned long flags; #define LOCKIT save_flags (flags);cli () @@ -124,14 +124,14 @@ int gs_write(struct tty_struct * tty, /* get exclusive "write" access to this port (problem 3) */ /* This is not a spinlock because we can have a disk access (page fault) in copy_from_user */ - down (& port->port_write_sem); + mutex_lock(& port->port_write_mutex); while (1) { c = count; /* This is safe because we "OWN" the "head". Noone else can - change the "head": we own the port_write_sem. */ + change the "head": we own the port_write_mutex. */ /* Don't overrun the end of the buffer */ t = SERIAL_XMIT_SIZE - port->xmit_head; if (t < c) c = t; @@ -153,7 +153,7 @@ int gs_write(struct tty_struct * tty, count -= c; total += c; } - up (& port->port_write_sem); + mutex_unlock(& port->port_write_mutex); gs_dprintk (GS_DEBUG_WRITE, "write: interrupts are %s\n", (port->flags & GS_TX_INTEN)?"enabled": "disabled"); @@ -214,7 +214,7 @@ int gs_write(struct tty_struct * tty, c = count; /* This is safe because we "OWN" the "head". Noone else can - change the "head": we own the port_write_sem. */ + change the "head": we own the port_write_mutex. */ /* Don't overrun the end of the buffer */ t = SERIAL_XMIT_SIZE - port->xmit_head; if (t < c) c = t; @@ -888,7 +888,7 @@ int gs_init_port(struct gs_port *port) spin_lock_irqsave (&port->driver_lock, flags); if (port->tty) clear_bit(TTY_IO_ERROR, &port->tty->flags); - init_MUTEX(&port->port_write_sem); + mutex_init(&port->port_write_mutex); port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; spin_unlock_irqrestore(&port->driver_lock, flags); gs_set_termios(port->tty, NULL); diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index 28c5a3193b8..ede128356af 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c @@ -181,7 +181,6 @@ static struct tty_driver *stli_serial; * is already swapping a shared buffer won't make things any worse. */ static char *stli_tmpwritebuf; -static DECLARE_MUTEX(stli_tmpwritesem); #define STLI_TXBUFSIZE 4096 diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index ccad7ae9454..ede365d0538 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c @@ -132,7 +132,7 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty) * We test the TTY_THROTTLED bit first so that it always * indicates the current state. The decision about whether * it is worth allowing more input has been taken by the caller. - * Can sleep, may be called under the atomic_read semaphore but + * Can sleep, may be called under the atomic_read_lock mutex but * this is not guaranteed. */ @@ -1132,7 +1132,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt) * buffer, and once to drain the space from the (physical) beginning of * the buffer to head pointer. * - * Called under the tty->atomic_read sem and with TTY_DONT_FLIP set + * Called under the tty->atomic_read_lock sem and with TTY_DONT_FLIP set * */ @@ -1262,11 +1262,11 @@ do_it_again: * Internal serialization of reads. */ if (file->f_flags & O_NONBLOCK) { - if (down_trylock(&tty->atomic_read)) + if (!mutex_trylock(&tty->atomic_read_lock)) return -EAGAIN; } else { - if (down_interruptible(&tty->atomic_read)) + if (mutex_lock_interruptible(&tty->atomic_read_lock)) return -ERESTARTSYS; } @@ -1393,7 +1393,7 @@ do_it_again: timeout = time; } clear_bit(TTY_DONT_FLIP, &tty->flags); - up(&tty->atomic_read); + mutex_unlock(&tty->atomic_read_lock); remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index ca41d62b1d9..8865387d344 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -27,6 +27,7 @@ #include <linux/rwsem.h> #include <linux/init.h> #include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/hardware/dec21285.h> #include <asm/io.h> @@ -56,7 +57,7 @@ static int gbWriteEnable; static int gbWriteBase64Enable; static volatile unsigned char *FLASH_BASE; static int gbFlashSize = KFLASH_SIZE; -static DECLARE_MUTEX(nwflash_sem); +static DEFINE_MUTEX(nwflash_mutex); extern spinlock_t gpio_lock; @@ -140,7 +141,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size, /* * We now lock against reads and writes. --rmk */ - if (down_interruptible(&nwflash_sem)) + if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count); @@ -149,7 +150,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size, *ppos += count; } else ret = -EFAULT; - up(&nwflash_sem); + mutex_unlock(&nwflash_mutex); } return ret; } @@ -188,7 +189,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf, /* * We now lock against reads and writes. --rmk */ - if (down_interruptible(&nwflash_sem)) + if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; written = 0; @@ -277,7 +278,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf, */ leds_event(led_release); - up(&nwflash_sem); + mutex_unlock(&nwflash_mutex); return written; } diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 30e4cbe16bb..15a7b408652 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -19,6 +19,7 @@ #include <linux/uio.h> #include <linux/cdev.h> #include <linux/device.h> +#include <linux/mutex.h> #include <asm/uaccess.h> @@ -29,7 +30,7 @@ struct raw_device_data { static struct class *raw_class; static struct raw_device_data raw_devices[MAX_RAW_MINORS]; -static DECLARE_MUTEX(raw_mutex); +static DEFINE_MUTEX(raw_mutex); static struct file_operations raw_ctl_fops; /* forward declaration */ /* @@ -53,7 +54,7 @@ static int raw_open(struct inode *inode, struct file *filp) return 0; } - down(&raw_mutex); + mutex_lock(&raw_mutex); /* * All we need to do on open is check that the device is bound. @@ -78,7 +79,7 @@ static int raw_open(struct inode *inode, struct file *filp) filp->f_dentry->d_inode->i_mapping = bdev->bd_inode->i_mapping; filp->private_data = bdev; - up(&raw_mutex); + mutex_unlock(&raw_mutex); return 0; out2: @@ -86,7 +87,7 @@ out2: out1: blkdev_put(bdev); out: - up(&raw_mutex); + mutex_unlock(&raw_mutex); return err; } @@ -99,14 +100,14 @@ static int raw_release(struct inode *inode, struct file *filp) const int minor= iminor(inode); struct block_device *bdev; - down(&raw_mutex); + mutex_lock(&raw_mutex); bdev = raw_devices[minor].binding; if (--raw_devices[minor].inuse == 0) { /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ inode->i_mapping = &inode->i_data; inode->i_mapping->backing_dev_info = &default_backing_dev_info; } - up(&raw_mutex); + mutex_unlock(&raw_mutex); bd_release(bdev); blkdev_put(bdev); @@ -187,9 +188,9 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, goto out; } - down(&raw_mutex); + mutex_lock(&raw_mutex); if (rawdev->inuse) { - up(&raw_mutex); + mutex_unlock(&raw_mutex); err = -EBUSY; goto out; } @@ -211,11 +212,11 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, bind_device(&rq); } } - up(&raw_mutex); + mutex_unlock(&raw_mutex); } else { struct block_device *bdev; - down(&raw_mutex); + mutex_lock(&raw_mutex); bdev = rawdev->binding; if (bdev) { rq.block_major = MAJOR(bdev->bd_dev); @@ -223,7 +224,7 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, } else { rq.block_major = rq.block_minor = 0; } - up(&raw_mutex); + mutex_unlock(&raw_mutex); if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) { err = -EFAULT; goto out; diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c index fee68cc895f..510bd3e0e88 100644 --- a/drivers/char/ser_a2232.c +++ b/drivers/char/ser_a2232.c @@ -97,7 +97,7 @@ #include <asm/amigahw.h> #include <linux/zorro.h> #include <asm/irq.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <linux/delay.h> @@ -654,7 +654,7 @@ static void a2232_init_portstructs(void) port->gs.closing_wait = 30 * HZ; port->gs.rd = &a2232_real_driver; #ifdef NEW_WRITE_LOCKING - init_MUTEX(&(port->gs.port_write_sem)); + init_MUTEX(&(port->gs.port_write_mutex)); #endif init_waitqueue_head(&port->gs.open_wait); init_waitqueue_head(&port->gs.close_wait); diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index 0e7d216e7eb..b543821d8cb 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved. */ /* @@ -77,7 +77,7 @@ scdrv_open(struct inode *inode, struct file *file) scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev); /* allocate memory for subchannel data */ - sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL); + sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); if (sd == NULL) { printk("%s: couldn't allocate subchannel data\n", __FUNCTION__); @@ -85,7 +85,6 @@ scdrv_open(struct inode *inode, struct file *file) } /* initialize subch_data_s fields */ - memset(sd, 0, sizeof (struct subch_data_s)); sd->sd_nasid = scd->scd_nasid; sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid); @@ -394,7 +393,7 @@ scdrv_init(void) sprintf(devnamep, "#%d", geo_slab(geoid)); /* allocate sysctl device data */ - scd = kmalloc(sizeof (struct sysctl_data_s), + scd = kzalloc(sizeof (struct sysctl_data_s), GFP_KERNEL); if (!scd) { printk("%s: failed to allocate device info" @@ -402,7 +401,6 @@ scdrv_init(void) SYSCTL_BASENAME, devname); continue; } - memset(scd, 0, sizeof (struct sysctl_data_s)); /* initialize sysctl device data fields */ scd->scd_nasid = cnodeid_to_nasid(cnode); diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c index a4fa507eed9..e234d50e142 100644 --- a/drivers/char/snsc_event.c +++ b/drivers/char/snsc_event.c @@ -287,7 +287,7 @@ scdrv_event_init(struct sysctl_data_s *scd) { int rv; - event_sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL); + event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); if (event_sd == NULL) { printk(KERN_WARNING "%s: couldn't allocate subchannel info" " for event monitoring\n", __FUNCTION__); @@ -295,7 +295,6 @@ scdrv_event_init(struct sysctl_data_s *scd) } /* initialize subch_data_s fields */ - memset(event_sd, 0, sizeof (struct subch_data_s)); event_sd->sd_nasid = scd->scd_nasid; spin_lock_init(&event_sd->sd_rlock); @@ -321,5 +320,3 @@ scdrv_event_init(struct sysctl_data_s *scd) return; } } - - diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index bdaab699210..3f5d6077f39 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c @@ -148,7 +148,6 @@ static struct tty_driver *stl_serial; * is already swapping a shared buffer won't make things any worse. */ static char *stl_tmpwritebuf; -static DECLARE_MUTEX(stl_tmpwritesem); /* * Define a local default termios struct. All ports will be created diff --git a/drivers/char/sx.c b/drivers/char/sx.c index a6b4f02bdce..3b474723027 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c @@ -2318,7 +2318,7 @@ static int sx_init_portstructs (int nboards, int nports) port->board = board; port->gs.rd = &sx_real_driver; #ifdef NEW_WRITE_LOCKING - port->gs.port_write_sem = MUTEX; + port->gs.port_write_mutex = MUTEX; #endif port->gs.driver_lock = SPIN_LOCK_UNLOCKED; /* diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 53d3d066554..76592ee1fb3 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -130,7 +130,7 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */ /* Semaphore to protect creating and releasing a tty. This is shared with vt.c for deeply disgusting hack reasons */ -DECLARE_MUTEX(tty_sem); +DEFINE_MUTEX(tty_mutex); #ifdef CONFIG_UNIX98_PTYS extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ @@ -1188,11 +1188,11 @@ void disassociate_ctty(int on_exit) lock_kernel(); - down(&tty_sem); + mutex_lock(&tty_mutex); tty = current->signal->tty; if (tty) { tty_pgrp = tty->pgrp; - up(&tty_sem); + mutex_unlock(&tty_mutex); if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) tty_vhangup(tty); } else { @@ -1200,7 +1200,7 @@ void disassociate_ctty(int on_exit) kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit); kill_pg(current->signal->tty_old_pgrp, SIGCONT, on_exit); } - up(&tty_sem); + mutex_unlock(&tty_mutex); unlock_kernel(); return; } @@ -1211,7 +1211,7 @@ void disassociate_ctty(int on_exit) } /* Must lock changes to tty_old_pgrp */ - down(&tty_sem); + mutex_lock(&tty_mutex); current->signal->tty_old_pgrp = 0; tty->session = 0; tty->pgrp = -1; @@ -1222,7 +1222,7 @@ void disassociate_ctty(int on_exit) p->signal->tty = NULL; } while_each_task_pid(current->signal->session, PIDTYPE_SID, p); read_unlock(&tasklist_lock); - up(&tty_sem); + mutex_unlock(&tty_mutex); unlock_kernel(); } @@ -1306,7 +1306,7 @@ static inline ssize_t do_tty_write( ssize_t ret = 0, written = 0; unsigned int chunk; - if (down_interruptible(&tty->atomic_write)) { + if (mutex_lock_interruptible(&tty->atomic_write_lock)) { return -ERESTARTSYS; } @@ -1329,7 +1329,7 @@ static inline ssize_t do_tty_write( if (count < chunk) chunk = count; - /* write_buf/write_cnt is protected by the atomic_write semaphore */ + /* write_buf/write_cnt is protected by the atomic_write_lock mutex */ if (tty->write_cnt < chunk) { unsigned char *buf; @@ -1338,7 +1338,7 @@ static inline ssize_t do_tty_write( buf = kmalloc(chunk, GFP_KERNEL); if (!buf) { - up(&tty->atomic_write); + mutex_unlock(&tty->atomic_write_lock); return -ENOMEM; } kfree(tty->write_buf); @@ -1374,7 +1374,7 @@ static inline ssize_t do_tty_write( inode->i_mtime = current_fs_time(inode->i_sb); ret = written; } - up(&tty->atomic_write); + mutex_unlock(&tty->atomic_write_lock); return ret; } @@ -1442,8 +1442,8 @@ static inline void tty_line_name(struct tty_driver *driver, int index, char *p) /* * WSH 06/09/97: Rewritten to remove races and properly clean up after a - * failed open. The new code protects the open with a semaphore, so it's - * really quite straightforward. The semaphore locking can probably be + * failed open. The new code protects the open with a mutex, so it's + * really quite straightforward. The mutex locking can probably be * relaxed for the (most common) case of reopening a tty. */ static int init_dev(struct tty_driver *driver, int idx, @@ -1640,7 +1640,7 @@ fast_track: success: *ret_tty = tty; - /* All paths come through here to release the semaphore */ + /* All paths come through here to release the mutex */ end_init: return retval; @@ -1837,7 +1837,7 @@ static void release_dev(struct file * filp) /* Guard against races with tty->count changes elsewhere and opens on /dev/tty */ - down(&tty_sem); + mutex_lock(&tty_mutex); tty_closing = tty->count <= 1; o_tty_closing = o_tty && (o_tty->count <= (pty_master ? 1 : 0)); @@ -1868,7 +1868,7 @@ static void release_dev(struct file * filp) printk(KERN_WARNING "release_dev: %s: read/write wait queue " "active!\n", tty_name(tty, buf)); - up(&tty_sem); + mutex_unlock(&tty_mutex); schedule(); } @@ -1934,7 +1934,7 @@ static void release_dev(struct file * filp) read_unlock(&tasklist_lock); } - up(&tty_sem); + mutex_unlock(&tty_mutex); /* check whether both sides are closing ... */ if (!tty_closing || (o_tty && !o_tty_closing)) @@ -2040,11 +2040,11 @@ retry_open: index = -1; retval = 0; - down(&tty_sem); + mutex_lock(&tty_mutex); if (device == MKDEV(TTYAUX_MAJOR,0)) { if (!current->signal->tty) { - up(&tty_sem); + mutex_unlock(&tty_mutex); return -ENXIO; } driver = current->signal->tty->driver; @@ -2070,18 +2070,18 @@ retry_open: noctty = 1; goto got_driver; } - up(&tty_sem); + mutex_unlock(&tty_mutex); return -ENODEV; } driver = get_tty_driver(device, &index); if (!driver) { - up(&tty_sem); + mutex_unlock(&tty_mutex); return -ENODEV; } got_driver: retval = init_dev(driver, index, &tty); - up(&tty_sem); + mutex_unlock(&tty_mutex); if (retval) return retval; @@ -2167,9 +2167,9 @@ static int ptmx_open(struct inode * inode, struct file * filp) } up(&allocated_ptys_lock); - down(&tty_sem); + mutex_lock(&tty_mutex); retval = init_dev(ptm_driver, index, &tty); - up(&tty_sem); + mutex_unlock(&tty_mutex); if (retval) goto out; @@ -2915,8 +2915,8 @@ static void initialize_tty_struct(struct tty_struct *tty) init_waitqueue_head(&tty->write_wait); init_waitqueue_head(&tty->read_wait); INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); - sema_init(&tty->atomic_read, 1); - sema_init(&tty->atomic_write, 1); + mutex_init(&tty->atomic_read_lock); + mutex_init(&tty->atomic_write_lock); spin_lock_init(&tty->read_lock); INIT_LIST_HEAD(&tty->tty_files); INIT_WORK(&tty->SAK_work, NULL, NULL); diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c index d9325281e48..fd00822ac14 100644 --- a/drivers/char/vme_scc.c +++ b/drivers/char/vme_scc.c @@ -184,7 +184,7 @@ static void scc_init_portstructs(void) port->gs.closing_wait = 30 * HZ; port->gs.rd = &scc_real_driver; #ifdef NEW_WRITE_LOCKING - port->gs.port_write_sem = MUTEX; + port->gs.port_write_mutex = MUTEX; #endif init_waitqueue_head(&port->gs.open_wait); init_waitqueue_head(&port->gs.close_wait); diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 0900d1dbee5..ca4844c527d 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -2489,7 +2489,7 @@ static int con_open(struct tty_struct *tty, struct file *filp) } /* - * We take tty_sem in here to prevent another thread from coming in via init_dev + * We take tty_mutex in here to prevent another thread from coming in via init_dev * and taking a ref against the tty while we're in the process of forgetting * about it and cleaning things up. * @@ -2497,7 +2497,7 @@ static int con_open(struct tty_struct *tty, struct file *filp) */ static void con_close(struct tty_struct *tty, struct file *filp) { - down(&tty_sem); + mutex_lock(&tty_mutex); acquire_console_sem(); if (tty && tty->count == 1) { struct vc_data *vc = tty->driver_data; @@ -2507,15 +2507,15 @@ static void con_close(struct tty_struct *tty, struct file *filp) tty->driver_data = NULL; release_console_sem(); vcs_remove_devfs(tty); - up(&tty_sem); + mutex_unlock(&tty_mutex); /* - * tty_sem is released, but we still hold BKL, so there is + * tty_mutex is released, but we still hold BKL, so there is * still exclusion against init_dev() */ return; } release_console_sem(); - up(&tty_sem); + mutex_unlock(&tty_mutex); } static void vc_init(struct vc_data *vc, unsigned int rows, @@ -2869,9 +2869,9 @@ void unblank_screen(void) } /* - * We defer the timer blanking to work queue so it can take the console semaphore + * We defer the timer blanking to work queue so it can take the console mutex * (console operations can still happen at irq time, but only from printk which - * has the console semaphore. Not perfect yet, but better than no locking + * has the console mutex. Not perfect yet, but better than no locking */ static void blank_screen_t(unsigned long dummy) { @@ -3234,6 +3234,14 @@ void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org) } } +int is_console_suspend_safe(void) +{ + /* It is unsafe to suspend devices while X has control of the + * hardware. Make sure we are running on a kernel-controlled console. + */ + return vc_cons[fg_console].d->vc_mode == KD_TEXT; +} + /* * Visible symbols for modules */ diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c index 1533f56baa4..2700c5c45b8 100644 --- a/drivers/char/watchdog/pcwd_usb.c +++ b/drivers/char/watchdog/pcwd_usb.c @@ -42,6 +42,7 @@ #include <linux/completion.h> #include <asm/uaccess.h> #include <linux/usb.h> +#include <linux/mutex.h> #ifdef CONFIG_USB_DEBUG @@ -143,7 +144,7 @@ struct usb_pcwd_private { static struct usb_pcwd_private *usb_pcwd_device; /* prevent races between open() and disconnect() */ -static DECLARE_MUTEX (disconnect_sem); +static DEFINE_MUTEX(disconnect_mutex); /* local function prototypes */ static int usb_pcwd_probe (struct usb_interface *interface, const struct usb_device_id *id); @@ -723,7 +724,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface) struct usb_pcwd_private *usb_pcwd; /* prevent races with open() */ - down (&disconnect_sem); + mutex_lock(&disconnect_mutex); usb_pcwd = usb_get_intfdata (interface); usb_set_intfdata (interface, NULL); @@ -749,7 +750,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface) cards_found--; - up (&disconnect_sem); + mutex_unlock(&disconnect_mutex); printk(KERN_INFO PFX "USB PC Watchdog disconnected\n"); } diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index d7125f4d911..35897079a78 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -26,6 +26,7 @@ #include <linux/netlink.h> #include <linux/moduleparam.h> #include <linux/connector.h> +#include <linux/mutex.h> #include <net/sock.h> @@ -41,7 +42,7 @@ module_param(cn_val, uint, 0); MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); MODULE_PARM_DESC(cn_val, "Connector's main device val."); -static DECLARE_MUTEX(notify_lock); +static DEFINE_MUTEX(notify_lock); static LIST_HEAD(notify_list); static struct cn_dev cdev; @@ -260,7 +261,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) { struct cn_ctl_entry *ent; - down(¬ify_lock); + mutex_lock(¬ify_lock); list_for_each_entry(ent, ¬ify_list, notify_entry) { int i; struct cn_notify_req *req; @@ -293,7 +294,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) cn_netlink_send(&m, ctl->group, GFP_KERNEL); } } - up(¬ify_lock); + mutex_unlock(¬ify_lock); } /* @@ -407,14 +408,14 @@ static void cn_callback(void *data) if (ctl->group == 0) { struct cn_ctl_entry *n; - down(¬ify_lock); + mutex_lock(¬ify_lock); list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) { if (cn_ctl_msg_equals(ent->msg, ctl)) { list_del(&ent->notify_entry); kfree(ent); } } - up(¬ify_lock); + mutex_unlock(¬ify_lock); return; } @@ -429,9 +430,9 @@ static void cn_callback(void *data) memcpy(ent->msg, ctl, size - sizeof(*ent)); - down(¬ify_lock); + mutex_lock(¬ify_lock); list_add(&ent->notify_entry, ¬ify_list); - up(¬ify_lock); + mutex_unlock(¬ify_lock); } static int __init cn_init(void) diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 3a4e5c5b4e1..d6543fc4a92 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -33,6 +33,7 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/mutex.h> #include <asm/io.h> #include <asm/semaphore.h> @@ -48,7 +49,7 @@ static u8 *smi_data_buf; static dma_addr_t smi_data_buf_handle; static unsigned long smi_data_buf_size; static u32 smi_data_buf_phys_addr; -static DECLARE_MUTEX(smi_data_lock); +static DEFINE_MUTEX(smi_data_lock); static unsigned int host_control_action; static unsigned int host_control_smi_type; @@ -139,9 +140,9 @@ static ssize_t smi_data_buf_size_store(struct device *dev, buf_size = simple_strtoul(buf, NULL, 10); /* make sure SMI data buffer is at least buf_size */ - down(&smi_data_lock); + mutex_lock(&smi_data_lock); ret = smi_data_buf_realloc(buf_size); - up(&smi_data_lock); + mutex_unlock(&smi_data_lock); if (ret) return ret; @@ -154,7 +155,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos, size_t max_read; ssize_t ret; - down(&smi_data_lock); + mutex_lock(&smi_data_lock); if (pos >= smi_data_buf_size) { ret = 0; @@ -165,7 +166,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos, ret = min(max_read, count); memcpy(buf, smi_data_buf + pos, ret); out: - up(&smi_data_lock); + mutex_unlock(&smi_data_lock); return ret; } @@ -174,7 +175,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos, { ssize_t ret; - down(&smi_data_lock); + mutex_lock(&smi_data_lock); ret = smi_data_buf_realloc(pos + count); if (ret) @@ -183,7 +184,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos, memcpy(smi_data_buf + pos, buf, count); ret = count; out: - up(&smi_data_lock); + mutex_unlock(&smi_data_lock); return ret; } @@ -201,9 +202,9 @@ static ssize_t host_control_action_store(struct device *dev, ssize_t ret; /* make sure buffer is available for host control command */ - down(&smi_data_lock); + mutex_lock(&smi_data_lock); ret = smi_data_buf_realloc(sizeof(struct apm_cmd)); - up(&smi_data_lock); + mutex_unlock(&smi_data_lock); if (ret) return ret; @@ -302,7 +303,7 @@ static ssize_t smi_request_store(struct device *dev, unsigned long val = simple_strtoul(buf, NULL, 10); ssize_t ret; - down(&smi_data_lock); + mutex_lock(&smi_data_lock); if (smi_data_buf_size < sizeof(struct smi_cmd)) { ret = -ENODEV; @@ -334,7 +335,7 @@ static ssize_t smi_request_store(struct device *dev, } out: - up(&smi_data_lock); + mutex_unlock(&smi_data_lock); return ret; } diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 7230d4e0819..99cdc612d2c 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -406,13 +406,14 @@ config SENSORS_W83L785TS will be called w83l785ts. config SENSORS_W83627HF - tristate "Winbond W83627HF, W83627THF, W83637HF, W83697HF" - depends on HWMON && I2C && EXPERIMENTAL + tristate "Winbond W83627HF, W83627THF, W83637HF, W83687THF, W83697HF" + depends on HWMON && I2C select I2C_ISA select HWMON_VID help If you say yes here you get support for the Winbond W836X7 series - of sensor chips: the W83627HF, W83627THF, W83637HF, and the W83697HF + of sensor chips: the W83627HF, W83627THF, W83637HF, W83687THF and + W83697HF. This driver can also be built as a module. If so, the module will be called w83627hf. diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 665612729cb..2b6e74dd4a8 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -26,6 +26,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ @@ -92,7 +93,7 @@ struct adm1021_data { struct class_device *class_dev; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -162,10 +163,10 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co struct adm1021_data *data = i2c_get_clientdata(client); \ int temp = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = TEMP_TO_REG(temp); \ adm1021_write_value(client, reg, data->value); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } set(temp_max, ADM1021_REG_TOS_W); @@ -275,7 +276,7 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind) strlcpy(new_client->name, type_name, I2C_NAME_SIZE); data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -351,7 +352,7 @@ static struct adm1021_data *adm1021_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adm1021_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -375,7 +376,7 @@ static struct adm1021_data *adm1021_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c index 9331c56d2ba..a4c859c9fbf 100644 --- a/drivers/hwmon/adm1025.c +++ b/drivers/hwmon/adm1025.c @@ -53,6 +53,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> /* * Addresses to scan @@ -133,7 +134,7 @@ static struct i2c_driver adm1025_driver = { struct adm1025_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -207,11 +208,11 @@ static ssize_t set_in##offset##_min(struct device *dev, struct device_attribute struct adm1025_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->in_min[offset] = IN_TO_REG(val, in_scale[offset]); \ i2c_smbus_write_byte_data(client, ADM1025_REG_IN_MIN(offset), \ data->in_min[offset]); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } \ static ssize_t set_in##offset##_max(struct device *dev, struct device_attribute *attr, const char *buf, \ @@ -221,11 +222,11 @@ static ssize_t set_in##offset##_max(struct device *dev, struct device_attribute struct adm1025_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->in_max[offset] = IN_TO_REG(val, in_scale[offset]); \ i2c_smbus_write_byte_data(client, ADM1025_REG_IN_MAX(offset), \ data->in_max[offset]); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } \ static DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \ @@ -247,11 +248,11 @@ static ssize_t set_temp##offset##_min(struct device *dev, struct device_attribut struct adm1025_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->temp_min[offset-1] = TEMP_TO_REG(val); \ i2c_smbus_write_byte_data(client, ADM1025_REG_TEMP_LOW(offset-1), \ data->temp_min[offset-1]); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } \ static ssize_t set_temp##offset##_max(struct device *dev, struct device_attribute *attr, const char *buf, \ @@ -261,11 +262,11 @@ static ssize_t set_temp##offset##_max(struct device *dev, struct device_attribut struct adm1025_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->temp_max[offset-1] = TEMP_TO_REG(val); \ i2c_smbus_write_byte_data(client, ADM1025_REG_TEMP_HIGH(offset-1), \ data->temp_max[offset-1]); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } \ static DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \ @@ -404,7 +405,7 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) /* We can fill in the remaining client fields */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -523,7 +524,7 @@ static struct adm1025_data *adm1025_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adm1025_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { int i; @@ -558,7 +559,7 @@ static struct adm1025_data *adm1025_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index fefe6e74fd0..6d4f8b8d358 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -32,6 +32,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; @@ -260,10 +261,10 @@ struct pwm_data { struct adm1026_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; int valid; /* !=0 if following fields are valid */ unsigned long last_reading; /* In jiffies */ unsigned long last_config; /* In jiffies */ @@ -298,9 +299,8 @@ static int adm1026_attach_adapter(struct i2c_adapter *adapter); static int adm1026_detect(struct i2c_adapter *adapter, int address, int kind); static int adm1026_detach_client(struct i2c_client *client); -static int adm1026_read_value(struct i2c_client *client, u8 register); -static int adm1026_write_value(struct i2c_client *client, u8 register, - int value); +static int adm1026_read_value(struct i2c_client *client, u8 reg); +static int adm1026_write_value(struct i2c_client *client, u8 reg, int value); static void adm1026_print_gpio(struct i2c_client *client); static void adm1026_fixup_gpio(struct i2c_client *client); static struct adm1026_data *adm1026_update_device(struct device *dev); @@ -575,7 +575,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev) int i; long value, alarms, gpio; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_reading + ADM1026_DATA_INTERVAL)) { /* Things that change quickly */ @@ -710,7 +710,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev) dev_dbg(&client->dev, "Setting VID from GPIO11-15.\n"); data->vid = (data->gpio >> 11) & 0x1f; data->valid = 1; - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } @@ -739,10 +739,10 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = INS_TO_REG(nr, val); adm1026_write_value(client, ADM1026_REG_IN_MIN[nr], data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, @@ -762,10 +762,10 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = INS_TO_REG(nr, val); adm1026_write_value(client, ADM1026_REG_IN_MAX[nr], data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -813,10 +813,10 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr, c struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[16] = INS_TO_REG(16, val + NEG12_OFFSET); adm1026_write_value(client, ADM1026_REG_IN_MIN[16], data->in_min[16]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_in16_max(struct device *dev, struct device_attribute *attr, char *buf) @@ -831,10 +831,10 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr, c struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[16] = INS_TO_REG(16, val+NEG12_OFFSET); adm1026_write_value(client, ADM1026_REG_IN_MAX[16], data->in_max[16]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -874,11 +874,11 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, data->fan_div[nr]); adm1026_write_value(client, ADM1026_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -939,7 +939,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, if (new_div == 0) { return -EINVAL; } - down(&data->update_lock); + mutex_lock(&data->update_lock); orig_div = data->fan_div[nr]; data->fan_div[nr] = DIV_FROM_REG(new_div); @@ -958,7 +958,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, if (data->fan_div[nr] != orig_div) { fixup_fan_min(dev,nr,orig_div); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1001,11 +1001,11 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_MIN[nr], data->temp_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, @@ -1025,11 +1025,11 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_MAX[nr], data->temp_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1064,11 +1064,11 @@ static ssize_t set_temp_offset(struct device *dev, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_offset[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_OFFSET[nr], data->temp_offset[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1115,11 +1115,11 @@ static ssize_t set_temp_auto_point1_temp(struct device *dev, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_tmin[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_TMIN[nr], data->temp_tmin[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1150,11 +1150,11 @@ static ssize_t set_temp_crit_enable(struct device *dev, int val = simple_strtol(buf, NULL, 10); if ((val == 1) || (val==0)) { - down(&data->update_lock); + mutex_lock(&data->update_lock); data->config1 = (data->config1 & ~CFG1_THERM_HOT) | (val << 4); adm1026_write_value(client, ADM1026_REG_CONFIG1, data->config1); - up(&data->update_lock); + mutex_unlock(&data->update_lock); } return count; } @@ -1184,11 +1184,11 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_THERM[nr], data->temp_crit[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1212,10 +1212,10 @@ static ssize_t set_analog_out_reg(struct device *dev, struct device_attribute *a struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->analog_out = DAC_TO_REG(val); adm1026_write_value(client, ADM1026_REG_DAC, data->analog_out); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1267,7 +1267,7 @@ static ssize_t set_alarm_mask(struct device *dev, struct device_attribute *attr, int val = simple_strtol(buf, NULL, 10); unsigned long mask; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->alarm_mask = val & 0x7fffffff; mask = data->alarm_mask | (data->gpio_mask & 0x10000 ? 0x80000000 : 0); @@ -1282,7 +1282,7 @@ static ssize_t set_alarm_mask(struct device *dev, struct device_attribute *attr, mask >>= 8; adm1026_write_value(client, ADM1026_REG_MASK4, mask & 0xff); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1303,7 +1303,7 @@ static ssize_t set_gpio(struct device *dev, struct device_attribute *attr, const int val = simple_strtol(buf, NULL, 10); long gpio; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->gpio = val & 0x1ffff; gpio = data->gpio; adm1026_write_value(client, ADM1026_REG_GPIO_STATUS_0_7,gpio & 0xff); @@ -1311,7 +1311,7 @@ static ssize_t set_gpio(struct device *dev, struct device_attribute *attr, const adm1026_write_value(client, ADM1026_REG_GPIO_STATUS_8_15,gpio & 0xff); gpio = ((gpio >> 1) & 0x80) | (data->alarms >> 24 & 0x7f); adm1026_write_value(client, ADM1026_REG_STATUS4,gpio & 0xff); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1331,7 +1331,7 @@ static ssize_t set_gpio_mask(struct device *dev, struct device_attribute *attr, int val = simple_strtol(buf, NULL, 10); long mask; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->gpio_mask = val & 0x1ffff; mask = data->gpio_mask; adm1026_write_value(client, ADM1026_REG_GPIO_MASK_0_7,mask & 0xff); @@ -1339,7 +1339,7 @@ static ssize_t set_gpio_mask(struct device *dev, struct device_attribute *attr, adm1026_write_value(client, ADM1026_REG_GPIO_MASK_8_15,mask & 0xff); mask = ((mask >> 1) & 0x80) | (data->alarm_mask >> 24 & 0x7f); adm1026_write_value(client, ADM1026_REG_MASK1,mask & 0xff); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1359,10 +1359,10 @@ static ssize_t set_pwm_reg(struct device *dev, struct device_attribute *attr, co if (data->pwm1.enable == 1) { int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm1.pwm = PWM_TO_REG(val); adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm); - up(&data->update_lock); + mutex_unlock(&data->update_lock); } return count; } @@ -1378,14 +1378,14 @@ static ssize_t set_auto_pwm_min(struct device *dev, struct device_attribute *att struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm1.auto_pwm_min = SENSORS_LIMIT(val,0,255); if (data->pwm1.enable == 2) { /* apply immediately */ data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) | PWM_MIN_TO_REG(data->pwm1.auto_pwm_min)); adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_auto_pwm_max(struct device *dev, struct device_attribute *attr, char *buf) @@ -1406,7 +1406,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr, int old_enable; if ((val >= 0) && (val < 3)) { - down(&data->update_lock); + mutex_lock(&data->update_lock); old_enable = data->pwm1.enable; data->pwm1.enable = val; data->config1 = (data->config1 & ~CFG1_PWM_AFC) @@ -1424,7 +1424,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr, adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); } return count; } @@ -1541,7 +1541,7 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address, /* Fill in the remaining client fields */ data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index d0639796608..3bf2da621ae 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -28,6 +28,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* Following macros takes channel parameter starting from 0 to 2 */ #define ADM1031_REG_FAN_SPEED(nr) (0x08 + (nr)) @@ -70,7 +71,7 @@ typedef u8 auto_chan_table_t[8][2]; struct adm1031_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; int chip_type; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -262,10 +263,10 @@ set_fan_auto_channel(struct device *dev, const char *buf, size_t count, int nr) old_fan_mode = data->conf1; - down(&data->update_lock); + mutex_lock(&data->update_lock); if ((ret = get_fan_auto_nearest(data, nr, val, data->conf1, ®))) { - up(&data->update_lock); + mutex_unlock(&data->update_lock); return ret; } if (((data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1)) & ADM1031_CONF1_AUTO_MODE) ^ @@ -288,7 +289,7 @@ set_fan_auto_channel(struct device *dev, const char *buf, size_t count, int nr) } data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1); adm1031_write_value(client, ADM1031_REG_CONF1, data->conf1); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -329,11 +330,11 @@ set_auto_temp_min(struct device *dev, const char *buf, size_t count, int nr) struct adm1031_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), data->auto_temp[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_auto_temp_max(struct device *dev, char *buf, int nr) @@ -349,11 +350,11 @@ set_auto_temp_max(struct device *dev, const char *buf, size_t count, int nr) struct adm1031_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]); adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), data->temp_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -405,11 +406,11 @@ set_pwm(struct device *dev, const char *buf, size_t count, int nr) int val = simple_strtol(buf, NULL, 10); int reg; - down(&data->update_lock); + mutex_lock(&data->update_lock); if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) && (((val>>4) & 0xf) != 5)) { /* In automatic mode, the only PWM accepted is 33% */ - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } data->pwm[nr] = PWM_TO_REG(val); @@ -417,7 +418,7 @@ set_pwm(struct device *dev, const char *buf, size_t count, int nr) adm1031_write_value(client, ADM1031_REG_PWM, nr ? ((data->pwm[nr] << 4) & 0xf0) | (reg & 0xf) : (data->pwm[nr] & 0xf) | (reg & 0xf0)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -511,7 +512,7 @@ set_fan_min(struct device *dev, const char *buf, size_t count, int nr) struct adm1031_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (val) { data->fan_min[nr] = FAN_TO_REG(val, FAN_DIV_FROM_REG(data->fan_div[nr])); @@ -519,7 +520,7 @@ set_fan_min(struct device *dev, const char *buf, size_t count, int nr) data->fan_min[nr] = 0xff; } adm1031_write_value(client, ADM1031_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t @@ -540,7 +541,7 @@ set_fan_div(struct device *dev, const char *buf, size_t count, int nr) if (tmp == 0xff) return -EINVAL; - down(&data->update_lock); + mutex_lock(&data->update_lock); old_div = FAN_DIV_FROM_REG(data->fan_div[nr]); data->fan_div[nr] = (tmp & 0xC0) | (0x3f & data->fan_div[nr]); new_min = data->fan_min[nr] * old_div / @@ -553,7 +554,7 @@ set_fan_div(struct device *dev, const char *buf, size_t count, int nr) data->fan_div[nr]); adm1031_write_value(client, ADM1031_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -627,11 +628,11 @@ set_temp_min(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtol(buf, NULL, 10); val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), data->temp_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t @@ -643,11 +644,11 @@ set_temp_max(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtol(buf, NULL, 10); val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), data->temp_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t @@ -659,11 +660,11 @@ set_temp_crit(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtol(buf, NULL, 10); val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), data->temp_crit[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -778,7 +779,7 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -891,7 +892,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev) struct adm1031_data *data = i2c_get_clientdata(client); int chan; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -965,7 +966,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index 5ddc22fea4a..43f6991b588 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -49,6 +49,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, @@ -150,7 +151,7 @@ struct adm9240_data { enum chips type; struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; unsigned long last_updated_measure; unsigned long last_updated_config; @@ -195,11 +196,11 @@ static ssize_t set_max(struct device *dev, struct device_attribute *devattr, struct adm9240_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[attr->index] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, ADM9240_REG_TEMP_MAX(attr->index), data->temp_max[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -246,11 +247,11 @@ static ssize_t set_in_min(struct device *dev, struct adm9240_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[attr->index] = IN_TO_REG(val, attr->index); i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MIN(attr->index), data->in_min[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -263,11 +264,11 @@ static ssize_t set_in_max(struct device *dev, struct adm9240_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[attr->index] = IN_TO_REG(val, attr->index); i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MAX(attr->index), data->in_max[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -350,7 +351,7 @@ static ssize_t set_fan_min(struct device *dev, int nr = attr->index; u8 new_div; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (!val) { data->fan_min[nr] = 255; @@ -390,7 +391,7 @@ static ssize_t set_fan_min(struct device *dev, i2c_smbus_write_byte_data(client, ADM9240_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -439,10 +440,10 @@ static ssize_t set_aout(struct device *dev, struct adm9240_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->aout = AOUT_TO_REG(val); i2c_smbus_write_byte_data(client, ADM9240_REG_ANALOG_OUT, data->aout); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout); @@ -539,7 +540,7 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind) /* fill in the remaining client fields and attach */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->type = kind; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); if ((err = i2c_attach_client(new_client))) goto exit_free; @@ -691,7 +692,7 @@ static struct adm9240_data *adm9240_update_device(struct device *dev) struct adm9240_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); /* minimum measurement cycle: 1.75 seconds */ if (time_after(jiffies, data->last_updated_measure + (HZ * 7 / 4)) @@ -771,7 +772,7 @@ static struct adm9240_data *adm9240_update_device(struct device *dev) data->last_updated_config = jiffies; data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c index ae9de63cf2e..65b2709f750 100644 --- a/drivers/hwmon/asb100.c +++ b/drivers/hwmon/asb100.c @@ -44,6 +44,7 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/jiffies.h> +#include <linux/mutex.h> #include "lm75.h" /* @@ -182,10 +183,10 @@ static u8 DIV_TO_REG(long val) struct asb100_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; unsigned long last_updated; /* In jiffies */ /* array of 2 pointers to subclients */ @@ -245,11 +246,11 @@ static ssize_t set_in_##reg(struct device *dev, const char *buf, \ struct asb100_data *data = i2c_get_clientdata(client); \ unsigned long val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->in_##reg[nr] = IN_TO_REG(val); \ asb100_write_value(client, ASB100_REG_IN_##REG(nr), \ data->in_##reg[nr]); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } @@ -331,10 +332,10 @@ static ssize_t set_fan_min(struct device *dev, const char *buf, struct asb100_data *data = i2c_get_clientdata(client); u32 val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); asb100_write_value(client, ASB100_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -351,7 +352,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, unsigned long val = simple_strtoul(buf, NULL, 10); int reg; - down(&data->update_lock); + mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); @@ -381,7 +382,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); asb100_write_value(client, ASB100_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -461,7 +462,7 @@ static ssize_t set_##reg(struct device *dev, const char *buf, \ struct asb100_data *data = i2c_get_clientdata(client); \ unsigned long val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ switch (nr) { \ case 1: case 2: \ data->reg[nr] = LM75_TEMP_TO_REG(val); \ @@ -472,7 +473,7 @@ static ssize_t set_##reg(struct device *dev, const char *buf, \ } \ asb100_write_value(client, ASB100_REG_TEMP_##REG(nr+1), \ data->reg[nr]); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } @@ -574,11 +575,11 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *attr, const struct asb100_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm &= 0x80; /* keep the enable bit */ data->pwm |= (0x0f & ASB100_PWM_TO_REG(val)); asb100_write_value(client, ASB100_REG_PWM1, data->pwm); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -595,11 +596,11 @@ static ssize_t set_pwm_enable1(struct device *dev, struct device_attribute *attr struct asb100_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm &= 0x0f; /* keep the duty cycle bits */ data->pwm |= (val ? 0x80 : 0x00); asb100_write_value(client, ASB100_REG_PWM1, data->pwm); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -729,7 +730,7 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind) } new_client = &data->client; - init_MUTEX(&data->lock); + mutex_init(&data->lock); i2c_set_clientdata(new_client, data); new_client->addr = address; new_client->adapter = adapter; @@ -789,7 +790,7 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind) data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -885,7 +886,7 @@ static int asb100_read_value(struct i2c_client *client, u16 reg) struct i2c_client *cl; int res, bank; - down(&data->lock); + mutex_lock(&data->lock); bank = (reg >> 8) & 0x0f; if (bank > 2) @@ -919,7 +920,7 @@ static int asb100_read_value(struct i2c_client *client, u16 reg) if (bank > 2) i2c_smbus_write_byte_data(client, ASB100_REG_BANK, 0); - up(&data->lock); + mutex_unlock(&data->lock); return res; } @@ -930,7 +931,7 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 value) struct i2c_client *cl; int bank; - down(&data->lock); + mutex_lock(&data->lock); bank = (reg >> 8) & 0x0f; if (bank > 2) @@ -960,7 +961,7 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 value) if (bank > 2) i2c_smbus_write_byte_data(client, ASB100_REG_BANK, 0); - up(&data->lock); + mutex_unlock(&data->lock); } static void asb100_init_client(struct i2c_client *client) @@ -984,7 +985,7 @@ static struct asb100_data *asb100_update_device(struct device *dev) struct asb100_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -1042,7 +1043,7 @@ static struct asb100_data *asb100_update_device(struct device *dev) dev_dbg(&client->dev, "... device update complete\n"); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c index b0c490073c8..728a1e8b919 100644 --- a/drivers/hwmon/atxp1.c +++ b/drivers/hwmon/atxp1.c @@ -26,6 +26,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("System voltages control via Attansic ATXP1"); @@ -60,7 +61,7 @@ static struct i2c_driver atxp1_driver = { struct atxp1_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; unsigned long last_updated; u8 valid; struct { @@ -80,7 +81,7 @@ static struct atxp1_data * atxp1_update_device(struct device *dev) client = to_i2c_client(dev); data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { @@ -93,7 +94,7 @@ static struct atxp1_data * atxp1_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return(data); } @@ -309,7 +310,7 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind) data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); err = i2c_attach_client(new_client); diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index 203f9c7abb2..478eb4bb857 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -28,6 +28,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> #include "lm75.h" /* Addresses to scan */ @@ -72,7 +73,7 @@ MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low") struct ds1621_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -156,10 +157,10 @@ static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *at struct ds1621_data *data = ds1621_update_client(dev); \ u16 val = LM75_TEMP_TO_REG(simple_strtoul(buf, NULL, 10)); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = val; \ ds1621_write_value(client, reg, data->value); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } @@ -242,7 +243,7 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address, /* Fill in remaining client fields and put it into the global list */ strlcpy(new_client->name, "ds1621", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -297,7 +298,7 @@ static struct ds1621_data *ds1621_update_client(struct device *dev) struct ds1621_data *data = i2c_get_clientdata(client); u8 new_conf; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -327,7 +328,7 @@ static struct ds1621_data *ds1621_update_client(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c index e029e0a94ec..885465df6e6 100644 --- a/drivers/hwmon/f71805f.c +++ b/drivers/hwmon/f71805f.c @@ -30,6 +30,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> static struct platform_device *pdev; @@ -131,10 +132,10 @@ static struct resource f71805f_resource __initdata = { struct f71805f_data { unsigned short addr; const char *name; - struct semaphore lock; + struct mutex lock; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ unsigned long last_limits; /* In jiffies */ @@ -224,20 +225,20 @@ static u8 f71805f_read8(struct f71805f_data *data, u8 reg) { u8 val; - down(&data->lock); + mutex_lock(&data->lock); outb(reg, data->addr + ADDR_REG_OFFSET); val = inb(data->addr + DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return val; } static void f71805f_write8(struct f71805f_data *data, u8 reg, u8 val) { - down(&data->lock); + mutex_lock(&data->lock); outb(reg, data->addr + ADDR_REG_OFFSET); outb(val, data->addr + DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); } /* It is important to read the MSB first, because doing so latches the @@ -246,24 +247,24 @@ static u16 f71805f_read16(struct f71805f_data *data, u8 reg) { u16 val; - down(&data->lock); + mutex_lock(&data->lock); outb(reg, data->addr + ADDR_REG_OFFSET); val = inb(data->addr + DATA_REG_OFFSET) << 8; outb(++reg, data->addr + ADDR_REG_OFFSET); val |= inb(data->addr + DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return val; } static void f71805f_write16(struct f71805f_data *data, u8 reg, u16 val) { - down(&data->lock); + mutex_lock(&data->lock); outb(reg, data->addr + ADDR_REG_OFFSET); outb(val >> 8, data->addr + DATA_REG_OFFSET); outb(++reg, data->addr + ADDR_REG_OFFSET); outb(val & 0xff, data->addr + DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); } static struct f71805f_data *f71805f_update_device(struct device *dev) @@ -271,7 +272,7 @@ static struct f71805f_data *f71805f_update_device(struct device *dev) struct f71805f_data *data = dev_get_drvdata(dev); int nr; - down(&data->update_lock); + mutex_lock(&data->update_lock); /* Limit registers cache is refreshed after 60 seconds */ if (time_after(jiffies, data->last_updated + 60 * HZ) @@ -323,7 +324,7 @@ static struct f71805f_data *f71805f_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } @@ -362,10 +363,10 @@ static ssize_t set_in0_max(struct device *dev, struct device_attribute struct f71805f_data *data = dev_get_drvdata(dev); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_high[0] = in0_to_reg(val); f71805f_write8(data, F71805F_REG_IN_HIGH(0), data->in_high[0]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -376,18 +377,14 @@ static ssize_t set_in0_min(struct device *dev, struct device_attribute struct f71805f_data *data = dev_get_drvdata(dev); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_low[0] = in0_to_reg(val); f71805f_write8(data, F71805F_REG_IN_LOW(0), data->in_low[0]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -static DEVICE_ATTR(in0_input, S_IRUGO, show_in0, NULL); -static DEVICE_ATTR(in0_max, S_IRUGO| S_IWUSR, show_in0_max, set_in0_max); -static DEVICE_ATTR(in0_min, S_IRUGO| S_IWUSR, show_in0_min, set_in0_min); - static ssize_t show_in(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -426,10 +423,10 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute int nr = attr->index; long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_high[nr] = in_to_reg(val); f71805f_write8(data, F71805F_REG_IN_HIGH(nr), data->in_high[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -442,31 +439,14 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute int nr = attr->index; long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_low[nr] = in_to_reg(val); f71805f_write8(data, F71805F_REG_IN_LOW(nr), data->in_low[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define sysfs_in(offset) \ -static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ - show_in, NULL, offset); \ -static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ - show_in_max, set_in_max, offset); \ -static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ - show_in_min, set_in_min, offset) - -sysfs_in(1); -sysfs_in(2); -sysfs_in(3); -sysfs_in(4); -sysfs_in(5); -sysfs_in(6); -sysfs_in(7); -sysfs_in(8); - static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -495,24 +475,14 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute int nr = attr->index; long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_low[nr] = fan_to_reg(val); f71805f_write16(data, F71805F_REG_FAN_LOW(nr), data->fan_low[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define sysfs_fan(offset) \ -static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ - show_fan, NULL, offset - 1); \ -static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ - show_fan_min, set_fan_min, offset - 1) - -sysfs_fan(1); -sysfs_fan(2); -sysfs_fan(3); - static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -562,10 +532,10 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute int nr = attr->index; long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_high[nr] = temp_to_reg(val); f71805f_write8(data, F71805F_REG_TEMP_HIGH(nr), data->temp_high[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -578,28 +548,14 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute int nr = attr->index; long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_hyst[nr] = temp_to_reg(val); f71805f_write8(data, F71805F_REG_TEMP_HYST(nr), data->temp_hyst[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define sysfs_temp(offset) \ -static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ - show_temp, NULL, offset - 1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ - show_temp_max, set_temp_max, offset - 1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \ - show_temp_hyst, set_temp_hyst, offset - 1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO, \ - show_temp_type, NULL, offset - 1) - -sysfs_temp(1); -sysfs_temp(2); -sysfs_temp(3); - static ssize_t show_alarms_in(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -625,10 +581,6 @@ static ssize_t show_alarms_temp(struct device *dev, struct device_attribute return sprintf(buf, "%d\n", (data->alarms[1] >> 3) & 0x07); } -static DEVICE_ATTR(alarms_in, S_IRUGO, show_alarms_in, NULL); -static DEVICE_ATTR(alarms_fan, S_IRUGO, show_alarms_fan, NULL); -static DEVICE_ATTR(alarms_temp, S_IRUGO, show_alarms_temp, NULL); - static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -637,7 +589,89 @@ static ssize_t show_name(struct device *dev, struct device_attribute return sprintf(buf, "%s\n", data->name); } -static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); +static struct device_attribute f71805f_dev_attr[] = { + __ATTR(in0_input, S_IRUGO, show_in0, NULL), + __ATTR(in0_max, S_IRUGO| S_IWUSR, show_in0_max, set_in0_max), + __ATTR(in0_min, S_IRUGO| S_IWUSR, show_in0_min, set_in0_min), + __ATTR(alarms_in, S_IRUGO, show_alarms_in, NULL), + __ATTR(alarms_fan, S_IRUGO, show_alarms_fan, NULL), + __ATTR(alarms_temp, S_IRUGO, show_alarms_temp, NULL), + __ATTR(name, S_IRUGO, show_name, NULL), +}; + +static struct sensor_device_attribute f71805f_sensor_attr[] = { + SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), + SENSOR_ATTR(in1_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 1), + SENSOR_ATTR(in1_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 1), + SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), + SENSOR_ATTR(in2_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 2), + SENSOR_ATTR(in2_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 2), + SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), + SENSOR_ATTR(in3_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 3), + SENSOR_ATTR(in3_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 3), + SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), + SENSOR_ATTR(in4_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 4), + SENSOR_ATTR(in4_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 4), + SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), + SENSOR_ATTR(in5_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 5), + SENSOR_ATTR(in5_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 5), + SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), + SENSOR_ATTR(in6_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 6), + SENSOR_ATTR(in6_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 6), + SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), + SENSOR_ATTR(in7_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 7), + SENSOR_ATTR(in7_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 7), + SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), + SENSOR_ATTR(in8_max, S_IRUGO | S_IWUSR, + show_in_max, set_in_max, 8), + SENSOR_ATTR(in8_min, S_IRUGO | S_IWUSR, + show_in_min, set_in_min, 8), + + SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0), + SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, + show_temp_max, set_temp_max, 0), + SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, + show_temp_hyst, set_temp_hyst, 0), + SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0), + SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1), + SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, + show_temp_max, set_temp_max, 1), + SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, + show_temp_hyst, set_temp_hyst, 1), + SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1), + SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2), + SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, + show_temp_max, set_temp_max, 2), + SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, + show_temp_hyst, set_temp_hyst, 2), + SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2), +}; + +static struct sensor_device_attribute f71805f_fan_attr[] = { + SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), + SENSOR_ATTR(fan1_min, S_IRUGO | S_IWUSR, + show_fan_min, set_fan_min, 0), + SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), + SENSOR_ATTR(fan2_min, S_IRUGO | S_IWUSR, + show_fan_min, set_fan_min, 1), + SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), + SENSOR_ATTR(fan3_min, S_IRUGO | S_IWUSR, + show_fan_min, set_fan_min, 2), +}; /* * Device registration and initialization @@ -668,7 +702,7 @@ static int __devinit f71805f_probe(struct platform_device *pdev) { struct f71805f_data *data; struct resource *res; - int err; + int i, err; if (!(data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL))) { err = -ENOMEM; @@ -678,9 +712,9 @@ static int __devinit f71805f_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_IO, 0); data->addr = res->start; - init_MUTEX(&data->lock); + mutex_init(&data->lock); data->name = "f71805f"; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); @@ -695,76 +729,31 @@ static int __devinit f71805f_probe(struct platform_device *pdev) f71805f_init_device(data); /* Register sysfs interface files */ - device_create_file(&pdev->dev, &dev_attr_in0_input); - device_create_file(&pdev->dev, &dev_attr_in0_max); - device_create_file(&pdev->dev, &dev_attr_in0_min); - device_create_file(&pdev->dev, &sensor_dev_attr_in1_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in2_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in3_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in4_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in5_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in6_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in7_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in8_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in1_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in2_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in3_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in4_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in5_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in6_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in7_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in8_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in1_min.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in2_min.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in3_min.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in4_min.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in5_min.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in6_min.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in7_min.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_in8_min.dev_attr); - if (data->fan_enabled & (1 << 0)) { - device_create_file(&pdev->dev, - &sensor_dev_attr_fan1_input.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_fan1_min.dev_attr); + for (i = 0; i < ARRAY_SIZE(f71805f_dev_attr); i++) { + err = device_create_file(&pdev->dev, &f71805f_dev_attr[i]); + if (err) + goto exit_class; } - if (data->fan_enabled & (1 << 1)) { - device_create_file(&pdev->dev, - &sensor_dev_attr_fan2_input.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_fan2_min.dev_attr); + for (i = 0; i < ARRAY_SIZE(f71805f_sensor_attr); i++) { + err = device_create_file(&pdev->dev, + &f71805f_sensor_attr[i].dev_attr); + if (err) + goto exit_class; } - if (data->fan_enabled & (1 << 2)) { - device_create_file(&pdev->dev, - &sensor_dev_attr_fan3_input.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_fan3_min.dev_attr); + for (i = 0; i < ARRAY_SIZE(f71805f_fan_attr); i++) { + if (!(data->fan_enabled & (1 << (i / 2)))) + continue; + err = device_create_file(&pdev->dev, + &f71805f_fan_attr[i].dev_attr); + if (err) + goto exit_class; } - device_create_file(&pdev->dev, - &sensor_dev_attr_temp1_input.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_temp2_input.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_temp3_input.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_temp2_max.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_temp3_max.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_temp1_max_hyst.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_temp2_max_hyst.dev_attr); - device_create_file(&pdev->dev, - &sensor_dev_attr_temp3_max_hyst.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_temp1_type.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_temp2_type.dev_attr); - device_create_file(&pdev->dev, &sensor_dev_attr_temp3_type.dev_attr); - device_create_file(&pdev->dev, &dev_attr_alarms_in); - device_create_file(&pdev->dev, &dev_attr_alarms_fan); - device_create_file(&pdev->dev, &dev_attr_alarms_temp); - device_create_file(&pdev->dev, &dev_attr_name); return 0; +exit_class: + dev_err(&pdev->dev, "Sysfs interface creation failed\n"); + hwmon_device_unregister(data->class_dev); exit_free: kfree(data); exit: diff --git a/drivers/hwmon/fscher.c b/drivers/hwmon/fscher.c index 25409181d1e..6bc76b40763 100644 --- a/drivers/hwmon/fscher.c +++ b/drivers/hwmon/fscher.c @@ -33,6 +33,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* * Addresses to scan @@ -133,7 +134,7 @@ static struct i2c_driver fscher_driver = { struct fscher_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -332,7 +333,7 @@ static int fscher_detect(struct i2c_adapter *adapter, int address, int kind) * global list */ strlcpy(new_client->name, "fscher", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -417,7 +418,7 @@ static struct fscher_data *fscher_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct fscher_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { @@ -457,7 +458,7 @@ static struct fscher_data *fscher_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } @@ -472,10 +473,10 @@ static ssize_t set_fan_status(struct i2c_client *client, struct fscher_data *dat /* bits 0..1, 3..7 reserved => mask with 0x04 */ unsigned long v = simple_strtoul(buf, NULL, 10) & 0x04; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_status[FAN_INDEX_FROM_NUM(nr)] &= ~v; fscher_write_value(client, reg, v); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -490,10 +491,10 @@ static ssize_t set_pwm(struct i2c_client *client, struct fscher_data *data, { unsigned long v = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[FAN_INDEX_FROM_NUM(nr)] = v > 0xff ? 0xff : v; fscher_write_value(client, reg, data->fan_min[FAN_INDEX_FROM_NUM(nr)]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -518,14 +519,14 @@ static ssize_t set_fan_div(struct i2c_client *client, struct fscher_data *data, return -EINVAL; } - down(&data->update_lock); + mutex_lock(&data->update_lock); /* bits 2..7 reserved => mask with 0x03 */ data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] &= ~0x03; data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] |= v; fscher_write_value(client, reg, data->fan_ripple[FAN_INDEX_FROM_NUM(nr)]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -552,10 +553,10 @@ static ssize_t set_temp_status(struct i2c_client *client, struct fscher_data *da /* bits 2..7 reserved, 0 read only => mask with 0x02 */ unsigned long v = simple_strtoul(buf, NULL, 10) & 0x02; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_status[TEMP_INDEX_FROM_NUM(nr)] &= ~v; fscher_write_value(client, reg, v); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -609,10 +610,10 @@ static ssize_t set_control(struct i2c_client *client, struct fscher_data *data, /* bits 1..7 reserved => mask with 0x01 */ unsigned long v = simple_strtoul(buf, NULL, 10) & 0x01; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->global_control &= ~v; fscher_write_value(client, reg, v); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -631,11 +632,11 @@ static ssize_t set_watchdog_control(struct i2c_client *client, struct /* bits 0..3 reserved => mask with 0xf0 */ unsigned long v = simple_strtoul(buf, NULL, 10) & 0xf0; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->watchdog[2] &= ~0xf0; data->watchdog[2] |= v; fscher_write_value(client, reg, data->watchdog[2]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -651,10 +652,10 @@ static ssize_t set_watchdog_status(struct i2c_client *client, struct fscher_data /* bits 0, 2..7 reserved => mask with 0x02 */ unsigned long v = simple_strtoul(buf, NULL, 10) & 0x02; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->watchdog[1] &= ~v; fscher_write_value(client, reg, v); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -669,10 +670,10 @@ static ssize_t set_watchdog_preset(struct i2c_client *client, struct fscher_data { unsigned long v = simple_strtoul(buf, NULL, 10) & 0xff; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->watchdog[0] = v; fscher_write_value(client, reg, data->watchdog[0]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c index 6d0146b5702..6dc4846b9ee 100644 --- a/drivers/hwmon/fscpos.c +++ b/drivers/hwmon/fscpos.c @@ -37,6 +37,7 @@ #include <linux/init.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* * Addresses to scan @@ -89,8 +90,8 @@ static int fscpos_attach_adapter(struct i2c_adapter *adapter); static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind); static int fscpos_detach_client(struct i2c_client *client); -static int fscpos_read_value(struct i2c_client *client, u8 register); -static int fscpos_write_value(struct i2c_client *client, u8 register, u8 value); +static int fscpos_read_value(struct i2c_client *client, u8 reg); +static int fscpos_write_value(struct i2c_client *client, u8 reg, u8 value); static struct fscpos_data *fscpos_update_device(struct device *dev); static void fscpos_init_client(struct i2c_client *client); @@ -114,7 +115,7 @@ static struct i2c_driver fscpos_driver = { struct fscpos_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* 0 until following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -208,13 +209,13 @@ static ssize_t set_fan_ripple(struct i2c_client *client, struct fscpos_data return -EINVAL; } - down(&data->update_lock); + mutex_lock(&data->update_lock); /* bits 2..7 reserved => mask with 0x03 */ data->fan_ripple[nr - 1] &= ~0x03; data->fan_ripple[nr - 1] |= v; fscpos_write_value(client, reg, data->fan_ripple[nr - 1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -232,10 +233,10 @@ static ssize_t set_pwm(struct i2c_client *client, struct fscpos_data *data, if (v < 0) v = 0; if (v > 255) v = 255; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm[nr - 1] = v; fscpos_write_value(client, reg, data->pwm[nr - 1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -278,11 +279,11 @@ static ssize_t set_wdog_control(struct i2c_client *client, struct fscpos_data /* bits 0..3 reserved => mask with 0xf0 */ unsigned long v = simple_strtoul(buf, NULL, 10) & 0xf0; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->wdog_control &= ~0xf0; data->wdog_control |= v; fscpos_write_value(client, reg, data->wdog_control); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -304,10 +305,10 @@ static ssize_t set_wdog_state(struct i2c_client *client, struct fscpos_data return -EINVAL; } - down(&data->update_lock); + mutex_lock(&data->update_lock); data->wdog_state &= ~v; fscpos_write_value(client, reg, v); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -321,10 +322,10 @@ static ssize_t set_wdog_preset(struct i2c_client *client, struct fscpos_data { unsigned long v = simple_strtoul(buf, NULL, 10) & 0xff; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->wdog_preset = v; fscpos_write_value(client, reg, data->wdog_preset); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -483,7 +484,7 @@ static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind) strlcpy(new_client->name, "fscpos", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -579,7 +580,7 @@ static struct fscpos_data *fscpos_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct fscpos_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { int i; @@ -625,7 +626,7 @@ static struct fscpos_data *fscpos_update_device(struct device *dev) data->last_updated = jiffies; data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c index 9e685e3a3bc..6606aabdb49 100644 --- a/drivers/hwmon/gl518sm.c +++ b/drivers/hwmon/gl518sm.c @@ -43,6 +43,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; @@ -120,7 +121,7 @@ struct gl518_data { struct class_device *class_dev; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -212,10 +213,10 @@ static ssize_t set_##suffix(struct device *dev, struct device_attribute *attr, c struct gl518_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = type##_TO_REG(val); \ gl518_write_value(client, reg, data->value); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } @@ -228,12 +229,12 @@ static ssize_t set_##suffix(struct device *dev, struct device_attribute *attr, c int regvalue; \ unsigned long val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ regvalue = gl518_read_value(client, reg); \ data->value = type##_TO_REG(val); \ regvalue = (regvalue & ~mask) | (data->value << shift); \ gl518_write_value(client, reg, regvalue); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } @@ -265,7 +266,7 @@ static ssize_t set_fan_min1(struct device *dev, struct device_attribute *attr, c int regvalue; unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); regvalue = gl518_read_value(client, GL518_REG_FAN_LIMIT); data->fan_min[0] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[0])); @@ -280,7 +281,7 @@ static ssize_t set_fan_min1(struct device *dev, struct device_attribute *attr, c data->beep_mask &= data->alarm_mask; gl518_write_value(client, GL518_REG_ALARM, data->beep_mask); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -291,7 +292,7 @@ static ssize_t set_fan_min2(struct device *dev, struct device_attribute *attr, c int regvalue; unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); regvalue = gl518_read_value(client, GL518_REG_FAN_LIMIT); data->fan_min[1] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[1])); @@ -306,7 +307,7 @@ static ssize_t set_fan_min2(struct device *dev, struct device_attribute *attr, c data->beep_mask &= data->alarm_mask; gl518_write_value(client, GL518_REG_ALARM, data->beep_mask); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -407,7 +408,7 @@ static int gl518_detect(struct i2c_adapter *adapter, int address, int kind) strlcpy(new_client->name, "gl518sm", I2C_NAME_SIZE); data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -525,7 +526,7 @@ static struct gl518_data *gl518_update_device(struct device *dev) struct gl518_data *data = i2c_get_clientdata(client); int val; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -586,7 +587,7 @@ static struct gl518_data *gl518_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c index baee60e44b5..47b4d49f75c 100644 --- a/drivers/hwmon/gl520sm.c +++ b/drivers/hwmon/gl520sm.c @@ -29,6 +29,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> /* Type of the extra sensor */ static unsigned short extra_sensor_type; @@ -121,7 +122,7 @@ static struct i2c_driver gl520_driver = { struct gl520_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until the following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -303,7 +304,7 @@ static ssize_t set_in_min(struct i2c_client *client, struct gl520_data *data, co long v = simple_strtol(buf, NULL, 10); u8 r; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (n == 0) r = VDD_TO_REG(v); @@ -317,7 +318,7 @@ static ssize_t set_in_min(struct i2c_client *client, struct gl520_data *data, co else gl520_write_value(client, reg, r); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -331,7 +332,7 @@ static ssize_t set_in_max(struct i2c_client *client, struct gl520_data *data, co else r = IN_TO_REG(v); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[n] = r; @@ -340,7 +341,7 @@ static ssize_t set_in_max(struct i2c_client *client, struct gl520_data *data, co else gl520_write_value(client, reg, r); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -373,7 +374,7 @@ static ssize_t set_fan_min(struct i2c_client *client, struct gl520_data *data, c unsigned long v = simple_strtoul(buf, NULL, 10); u8 r; - down(&data->update_lock); + mutex_lock(&data->update_lock); r = FAN_TO_REG(v, data->fan_div[n - 1]); data->fan_min[n - 1] = r; @@ -390,7 +391,7 @@ static ssize_t set_fan_min(struct i2c_client *client, struct gl520_data *data, c data->beep_mask &= data->alarm_mask; gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -409,7 +410,7 @@ static ssize_t set_fan_div(struct i2c_client *client, struct gl520_data *data, c return -EINVAL; } - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_div[n - 1] = r; if (n == 1) @@ -417,7 +418,7 @@ static ssize_t set_fan_div(struct i2c_client *client, struct gl520_data *data, c else gl520_write_value(client, reg, (gl520_read_value(client, reg) & ~0x30) | (r << 4)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -425,10 +426,10 @@ static ssize_t set_fan_off(struct i2c_client *client, struct gl520_data *data, c { u8 r = simple_strtoul(buf, NULL, 10)?1:0; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_off = r; gl520_write_value(client, reg, (gl520_read_value(client, reg) & ~0x0c) | (r << 2)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -454,10 +455,10 @@ static ssize_t set_temp_max(struct i2c_client *client, struct gl520_data *data, { long v = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[n - 1] = TEMP_TO_REG(v);; gl520_write_value(client, reg, data->temp_max[n - 1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -465,10 +466,10 @@ static ssize_t set_temp_max_hyst(struct i2c_client *client, struct gl520_data *d { long v = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max_hyst[n - 1] = TEMP_TO_REG(v); gl520_write_value(client, reg, data->temp_max_hyst[n - 1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -491,10 +492,10 @@ static ssize_t set_beep_enable(struct i2c_client *client, struct gl520_data *dat { u8 r = simple_strtoul(buf, NULL, 10)?0:1; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->beep_enable = !r; gl520_write_value(client, reg, (gl520_read_value(client, reg) & ~0x04) | (r << 2)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -502,11 +503,11 @@ static ssize_t set_beep_mask(struct i2c_client *client, struct gl520_data *data, { u8 r = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); r &= data->alarm_mask; data->beep_mask = r; gl520_write_value(client, reg, r); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -561,7 +562,7 @@ static int gl520_detect(struct i2c_adapter *adapter, int address, int kind) /* Fill in the remaining client fields */ strlcpy(new_client->name, "gl520sm", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -685,7 +686,7 @@ static struct gl520_data *gl520_update_device(struct device *dev) struct gl520_data *data = i2c_get_clientdata(client); int val; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { @@ -750,7 +751,7 @@ static struct gl520_data *gl520_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c index 23a9e1ea8e3..7636c1a58f9 100644 --- a/drivers/hwmon/hdaps.c +++ b/drivers/hwmon/hdaps.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/timer.h> #include <linux/dmi.h> +#include <linux/mutex.h> #include <asm/io.h> #define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */ @@ -70,10 +71,10 @@ static u8 km_activity; static int rest_x; static int rest_y; -static DECLARE_MUTEX(hdaps_sem); +static DEFINE_MUTEX(hdaps_mutex); /* - * __get_latch - Get the value from a given port. Callers must hold hdaps_sem. + * __get_latch - Get the value from a given port. Callers must hold hdaps_mutex. */ static inline u8 __get_latch(u16 port) { @@ -82,7 +83,7 @@ static inline u8 __get_latch(u16 port) /* * __check_latch - Check a port latch for a given value. Returns zero if the - * port contains the given value. Callers must hold hdaps_sem. + * port contains the given value. Callers must hold hdaps_mutex. */ static inline int __check_latch(u16 port, u8 val) { @@ -93,7 +94,7 @@ static inline int __check_latch(u16 port, u8 val) /* * __wait_latch - Wait up to 100us for a port latch to get a certain value, - * returning zero if the value is obtained. Callers must hold hdaps_sem. + * returning zero if the value is obtained. Callers must hold hdaps_mutex. */ static int __wait_latch(u16 port, u8 val) { @@ -110,7 +111,7 @@ static int __wait_latch(u16 port, u8 val) /* * __device_refresh - request a refresh from the accelerometer. Does not wait - * for refresh to complete. Callers must hold hdaps_sem. + * for refresh to complete. Callers must hold hdaps_mutex. */ static void __device_refresh(void) { @@ -124,7 +125,7 @@ static void __device_refresh(void) /* * __device_refresh_sync - request a synchronous refresh from the * accelerometer. We wait for the refresh to complete. Returns zero if - * successful and nonzero on error. Callers must hold hdaps_sem. + * successful and nonzero on error. Callers must hold hdaps_mutex. */ static int __device_refresh_sync(void) { @@ -134,7 +135,7 @@ static int __device_refresh_sync(void) /* * __device_complete - indicate to the accelerometer that we are done reading - * data, and then initiate an async refresh. Callers must hold hdaps_sem. + * data, and then initiate an async refresh. Callers must hold hdaps_mutex. */ static inline void __device_complete(void) { @@ -152,7 +153,7 @@ static int hdaps_readb_one(unsigned int port, u8 *val) { int ret; - down(&hdaps_sem); + mutex_lock(&hdaps_mutex); /* do a sync refresh -- we need to be sure that we read fresh data */ ret = __device_refresh_sync(); @@ -163,7 +164,7 @@ static int hdaps_readb_one(unsigned int port, u8 *val) __device_complete(); out: - up(&hdaps_sem); + mutex_unlock(&hdaps_mutex); return ret; } @@ -198,9 +199,9 @@ static int hdaps_read_pair(unsigned int port1, unsigned int port2, { int ret; - down(&hdaps_sem); + mutex_lock(&hdaps_mutex); ret = __hdaps_read_pair(port1, port2, val1, val2); - up(&hdaps_sem); + mutex_unlock(&hdaps_mutex); return ret; } @@ -213,7 +214,7 @@ static int hdaps_device_init(void) { int total, ret = -ENXIO; - down(&hdaps_sem); + mutex_lock(&hdaps_mutex); outb(0x13, 0x1610); outb(0x01, 0x161f); @@ -279,7 +280,7 @@ static int hdaps_device_init(void) } out: - up(&hdaps_sem); + mutex_unlock(&hdaps_mutex); return ret; } @@ -313,7 +314,7 @@ static struct platform_driver hdaps_driver = { }; /* - * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_sem. + * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_mutex. */ static void hdaps_calibrate(void) { @@ -325,7 +326,7 @@ static void hdaps_mousedev_poll(unsigned long unused) int x, y; /* Cannot sleep. Try nonblockingly. If we fail, try again later. */ - if (down_trylock(&hdaps_sem)) { + if (!mutex_trylock(&hdaps_mutex)) { mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD); return; } @@ -340,7 +341,7 @@ static void hdaps_mousedev_poll(unsigned long unused) mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD); out: - up(&hdaps_sem); + mutex_unlock(&hdaps_mutex); } @@ -420,9 +421,9 @@ static ssize_t hdaps_calibrate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - down(&hdaps_sem); + mutex_lock(&hdaps_mutex); hdaps_calibrate(); - up(&hdaps_sem); + mutex_unlock(&hdaps_mutex); return count; } diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index e497274916c..a74a44f16f5 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -54,6 +54,10 @@ (IMVP-II). You can find more information in the datasheet of Max1718 http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2452 + The 13 specification corresponds to the Intel Pentium M series. There + doesn't seem to be any named specification for these. The conversion + tables are detailed directly in the various Pentium M datasheets: + http://www.intel.com/design/intarch/pentiumm/docs_pentiumm.htm */ /* vrm is the VRM/VRD document version multiplied by 10. @@ -100,6 +104,8 @@ int vid_from_reg(int val, u8 vrm) case 17: /* Intel IMVP-II */ return(val & 0x10 ? 975 - (val & 0xF) * 25 : 1750 - val * 50); + case 13: + return(1708 - (val & 0x3f) * 16); default: /* report 0 for unknown */ printk(KERN_INFO "hwmon-vid: requested unknown VRM version\n"); return 0; @@ -129,8 +135,9 @@ struct vrm_model { static struct vrm_model vrm_models[] = { {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */ {X86_VENDOR_AMD, 0xF, ANY, ANY, 24}, /* Athlon 64, Opteron and above VRM 24 */ - {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 85}, /* 0.13um too */ + {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */ {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */ + {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */ {X86_VENDOR_INTEL, 0x6, ANY, ANY, 82}, /* any P6 */ {X86_VENDOR_INTEL, 0x7, ANY, ANY, 0}, /* Itanium */ {X86_VENDOR_INTEL, 0xF, 0x0, ANY, 90}, /* P4 */ diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index dddd3eb9b38..106fa01cdb6 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -17,6 +17,7 @@ #include <linux/idr.h> #include <linux/hwmon.h> #include <linux/gfp.h> +#include <linux/spinlock.h> #define HWMON_ID_PREFIX "hwmon" #define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d" @@ -24,6 +25,7 @@ static struct class *hwmon_class; static DEFINE_IDR(hwmon_idr); +static DEFINE_SPINLOCK(idr_lock); /** * hwmon_device_register - register w/ hwmon sysfs class @@ -37,20 +39,30 @@ static DEFINE_IDR(hwmon_idr); struct class_device *hwmon_device_register(struct device *dev) { struct class_device *cdev; - int id; + int id, err; - if (idr_pre_get(&hwmon_idr, GFP_KERNEL) == 0) +again: + if (unlikely(idr_pre_get(&hwmon_idr, GFP_KERNEL) == 0)) return ERR_PTR(-ENOMEM); - if (idr_get_new(&hwmon_idr, NULL, &id) < 0) - return ERR_PTR(-ENOMEM); + spin_lock(&idr_lock); + err = idr_get_new(&hwmon_idr, NULL, &id); + spin_unlock(&idr_lock); + + if (unlikely(err == -EAGAIN)) + goto again; + else if (unlikely(err)) + return ERR_PTR(err); id = id & MAX_ID_MASK; cdev = class_device_create(hwmon_class, NULL, MKDEV(0,0), dev, HWMON_ID_FORMAT, id); - if (IS_ERR(cdev)) + if (IS_ERR(cdev)) { + spin_lock(&idr_lock); idr_remove(&hwmon_idr, id); + spin_unlock(&idr_lock); + } return cdev; } @@ -64,9 +76,11 @@ void hwmon_device_unregister(struct class_device *cdev) { int id; - if (sscanf(cdev->class_id, HWMON_ID_FORMAT, &id) == 1) { + if (likely(sscanf(cdev->class_id, HWMON_ID_FORMAT, &id) == 1)) { class_device_unregister(cdev); + spin_lock(&idr_lock); idr_remove(&hwmon_idr, id); + spin_unlock(&idr_lock); } else dev_dbg(cdev->dev, "hwmon_device_unregister() failed: bad class ID!\n"); diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index d7a9401600b..06df92b3ee4 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -41,6 +41,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> @@ -194,10 +195,10 @@ static int DIV_TO_REG(int val) struct it87_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -224,9 +225,8 @@ static int it87_isa_attach_adapter(struct i2c_adapter *adapter); static int it87_detect(struct i2c_adapter *adapter, int address, int kind); static int it87_detach_client(struct i2c_client *client); -static int it87_read_value(struct i2c_client *client, u8 register); -static int it87_write_value(struct i2c_client *client, u8 register, - u8 value); +static int it87_read_value(struct i2c_client *client, u8 reg); +static int it87_write_value(struct i2c_client *client, u8 reg, u8 value); static struct it87_data *it87_update_device(struct device *dev); static int it87_check_pwm(struct i2c_client *client); static void it87_init_client(struct i2c_client *client, struct it87_data *data); @@ -290,11 +290,11 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, struct it87_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val); it87_write_value(client, IT87_REG_VIN_MIN(nr), data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, @@ -307,11 +307,11 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, struct it87_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val); it87_write_value(client, IT87_REG_VIN_MAX(nr), data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -381,10 +381,10 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, struct it87_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_high[nr] = TEMP_TO_REG(val); it87_write_value(client, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, @@ -397,10 +397,10 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, struct it87_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_low[nr] = TEMP_TO_REG(val); it87_write_value(client, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define show_temp_offset(offset) \ @@ -440,7 +440,7 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, struct it87_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->sensor &= ~(1 << nr); data->sensor &= ~(8 << nr); @@ -450,11 +450,11 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, else if (val == 2) data->sensor |= 8 << nr; else if (val != 0) { - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } it87_write_value(client, IT87_REG_TEMP_ENABLE, data->sensor); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define show_sensor_offset(offset) \ @@ -524,7 +524,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, int val = simple_strtol(buf, NULL, 10); u8 reg = it87_read_value(client, IT87_REG_FAN_DIV); - down(&data->update_lock); + mutex_lock(&data->update_lock); switch (nr) { case 0: data->fan_div[nr] = reg & 0x07; break; case 1: data->fan_div[nr] = (reg >> 3) & 0x07; break; @@ -533,7 +533,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); it87_write_value(client, IT87_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, @@ -548,7 +548,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, int i, min[3]; u8 old; - down(&data->update_lock); + mutex_lock(&data->update_lock); old = it87_read_value(client, IT87_REG_FAN_DIV); for (i = 0; i < 3; i++) @@ -576,7 +576,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, data->fan_min[i]=FAN_TO_REG(min[i], DIV_FROM_REG(data->fan_div[i])); it87_write_value(client, IT87_REG_FAN_MIN(i), data->fan_min[i]); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_pwm_enable(struct device *dev, @@ -589,7 +589,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct it87_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (val == 0) { int tmp; @@ -606,11 +606,11 @@ static ssize_t set_pwm_enable(struct device *dev, /* set saved pwm value, clear FAN_CTLX PWM mode bit */ it87_write_value(client, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr])); } else { - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, @@ -626,11 +626,11 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, if (val < 0 || val > 255) return -EINVAL; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->manual_pwm_ctl[nr] = val; if (data->fan_main_ctrl & (1 << nr)) it87_write_value(client, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr])); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -776,7 +776,7 @@ static int it87_detect(struct i2c_adapter *adapter, int address, int kind) new_client = &data->client; if (is_isa) - init_MUTEX(&data->lock); + mutex_init(&data->lock); i2c_set_clientdata(new_client, data); new_client->addr = address; new_client->adapter = adapter; @@ -823,7 +823,7 @@ static int it87_detect(struct i2c_adapter *adapter, int address, int kind) strlcpy(new_client->name, name, I2C_NAME_SIZE); data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -950,10 +950,10 @@ static int it87_read_value(struct i2c_client *client, u8 reg) int res; if (i2c_is_isa_client(client)) { - down(&data->lock); + mutex_lock(&data->lock); outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET); res = inb_p(client->addr + IT87_DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return res; } else return i2c_smbus_read_byte_data(client, reg); @@ -969,10 +969,10 @@ static int it87_write_value(struct i2c_client *client, u8 reg, u8 value) struct it87_data *data = i2c_get_clientdata(client); if (i2c_is_isa_client(client)) { - down(&data->lock); + mutex_lock(&data->lock); outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET); outb_p(value, client->addr + IT87_DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return 0; } else return i2c_smbus_write_byte_data(client, reg, value); @@ -1098,7 +1098,7 @@ static struct it87_data *it87_update_device(struct device *dev) struct it87_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -1160,7 +1160,7 @@ static struct it87_data *it87_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 6b1aa7ef552..071f0fc6ade 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -45,6 +45,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* * Addresses to scan @@ -153,7 +154,7 @@ static struct i2c_driver lm63_driver = { struct lm63_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -192,13 +193,13 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy, struct lm63_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan[1] = FAN_TO_REG(val); i2c_smbus_write_byte_data(client, LM63_REG_TACH_LIMIT_LSB, data->fan[1] & 0xFF); i2c_smbus_write_byte_data(client, LM63_REG_TACH_LIMIT_MSB, data->fan[1] >> 8); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -222,12 +223,12 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy, return -EPERM; val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm1_value = val <= 0 ? 0 : val >= 255 ? 2 * data->pwm1_freq : (val * data->pwm1_freq * 2 + 127) / 255; i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1_value); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -253,10 +254,10 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy, struct lm63_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp8[1] = TEMP8_TO_REG(val); i2c_smbus_write_byte_data(client, LM63_REG_LOCAL_HIGH, data->temp8[1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -284,13 +285,13 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, long val = simple_strtol(buf, NULL, 10); int nr = attr->index; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp11[nr] = TEMP11_TO_REG(val); i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2], data->temp11[nr] >> 8); i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], data->temp11[nr] & 0xff); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -314,11 +315,11 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute * long val = simple_strtol(buf, NULL, 10); long hyst; - down(&data->update_lock); + mutex_lock(&data->update_lock); hyst = TEMP8_FROM_REG(data->temp8[2]) - val; i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST, HYST_TO_REG(hyst)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -427,7 +428,7 @@ static int lm63_detect(struct i2c_adapter *adapter, int address, int kind) strlcpy(new_client->name, "lm63", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -530,7 +531,7 @@ static struct lm63_data *lm63_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { if (data->config & 0x04) { /* tachometer enabled */ @@ -582,7 +583,7 @@ static struct lm63_data *lm63_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 74ca2c8c61c..fc25b90ec24 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -25,6 +25,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> #include "lm75.h" @@ -47,7 +48,7 @@ I2C_CLIENT_INSMOD_1(lm75); struct lm75_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u16 temp_input; /* Register values */ @@ -91,10 +92,10 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co struct lm75_data *data = i2c_get_clientdata(client); \ int temp = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = LM75_TEMP_TO_REG(temp); \ lm75_write_value(client, reg, data->value); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } set(temp_max, LM75_REG_TEMP_OS); @@ -188,7 +189,7 @@ static int lm75_detect(struct i2c_adapter *adapter, int address, int kind) /* Fill in the remaining client fields and put it into the global list */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -264,7 +265,7 @@ static struct lm75_data *lm75_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lm75_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -277,7 +278,7 @@ static struct lm75_data *lm75_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c index df9e02aaa70..459cc977380 100644 --- a/drivers/hwmon/lm77.c +++ b/drivers/hwmon/lm77.c @@ -32,6 +32,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; @@ -51,7 +52,7 @@ I2C_CLIENT_INSMOD_1(lm77); struct lm77_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; unsigned long last_updated; /* In jiffies */ int temp_input; /* Temperatures */ @@ -139,10 +140,10 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co struct lm77_data *data = i2c_get_clientdata(client); \ long val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = val; \ lm77_write_value(client, reg, LM77_TEMP_TO_REG(data->value)); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } @@ -157,11 +158,11 @@ static ssize_t set_temp_crit_hyst(struct device *dev, struct device_attribute *a struct lm77_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_hyst = data->temp_crit - val; lm77_write_value(client, LM77_REG_TEMP_HYST, LM77_TEMP_TO_REG(data->temp_hyst)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -173,7 +174,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, long val = simple_strtoul(buf, NULL, 10); int oldcrithyst; - down(&data->update_lock); + mutex_lock(&data->update_lock); oldcrithyst = data->temp_crit - data->temp_hyst; data->temp_crit = val; data->temp_hyst = data->temp_crit - oldcrithyst; @@ -181,7 +182,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, LM77_TEMP_TO_REG(data->temp_crit)); lm77_write_value(client, LM77_REG_TEMP_HYST, LM77_TEMP_TO_REG(data->temp_hyst)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -306,7 +307,7 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) /* Fill in the remaining client fields and put it into the global list */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -380,7 +381,7 @@ static struct lm77_data *lm77_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lm77_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -406,7 +407,7 @@ static struct lm77_data *lm77_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index e404001e20d..94be3d797e6 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -27,6 +27,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> /* Addresses to scan */ @@ -131,10 +132,10 @@ static inline int TEMP_FROM_REG(s8 val) struct lm78_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -157,8 +158,8 @@ static int lm78_isa_attach_adapter(struct i2c_adapter *adapter); static int lm78_detect(struct i2c_adapter *adapter, int address, int kind); static int lm78_detach_client(struct i2c_client *client); -static int lm78_read_value(struct i2c_client *client, u8 register); -static int lm78_write_value(struct i2c_client *client, u8 register, u8 value); +static int lm78_read_value(struct i2c_client *client, u8 reg); +static int lm78_write_value(struct i2c_client *client, u8 reg, u8 value); static struct lm78_data *lm78_update_device(struct device *dev); static void lm78_init_client(struct i2c_client *client); @@ -207,10 +208,10 @@ static ssize_t set_in_min(struct device *dev, const char *buf, struct lm78_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val); lm78_write_value(client, LM78_REG_IN_MIN(nr), data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -221,10 +222,10 @@ static ssize_t set_in_max(struct device *dev, const char *buf, struct lm78_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val); lm78_write_value(client, LM78_REG_IN_MAX(nr), data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -288,10 +289,10 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr, struct lm78_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_over = TEMP_TO_REG(val); lm78_write_value(client, LM78_REG_TEMP_OVER, data->temp_over); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -307,10 +308,10 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr, struct lm78_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_hyst = TEMP_TO_REG(val); lm78_write_value(client, LM78_REG_TEMP_HYST, data->temp_hyst); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -342,10 +343,10 @@ static ssize_t set_fan_min(struct device *dev, const char *buf, struct lm78_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); lm78_write_value(client, LM78_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -368,7 +369,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, unsigned long min; u8 reg; - down(&data->update_lock); + mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); @@ -380,7 +381,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, default: dev_err(&client->dev, "fan_div value %ld not " "supported. Choose one of 1, 2, 4 or 8!\n", val); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } @@ -398,7 +399,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); lm78_write_value(client, LM78_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -548,7 +549,7 @@ static int lm78_detect(struct i2c_adapter *adapter, int address, int kind) new_client = &data->client; if (is_isa) - init_MUTEX(&data->lock); + mutex_init(&data->lock); i2c_set_clientdata(new_client, data); new_client->addr = address; new_client->adapter = adapter; @@ -598,7 +599,7 @@ static int lm78_detect(struct i2c_adapter *adapter, int address, int kind) data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -697,10 +698,10 @@ static int lm78_read_value(struct i2c_client *client, u8 reg) int res; if (i2c_is_isa_client(client)) { struct lm78_data *data = i2c_get_clientdata(client); - down(&data->lock); + mutex_lock(&data->lock); outb_p(reg, client->addr + LM78_ADDR_REG_OFFSET); res = inb_p(client->addr + LM78_DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return res; } else return i2c_smbus_read_byte_data(client, reg); @@ -717,10 +718,10 @@ static int lm78_write_value(struct i2c_client *client, u8 reg, u8 value) { if (i2c_is_isa_client(client)) { struct lm78_data *data = i2c_get_clientdata(client); - down(&data->lock); + mutex_lock(&data->lock); outb_p(reg, client->addr + LM78_ADDR_REG_OFFSET); outb_p(value, client->addr + LM78_DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return 0; } else return i2c_smbus_write_byte_data(client, reg, value); @@ -742,7 +743,7 @@ static struct lm78_data *lm78_update_device(struct device *dev) struct lm78_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -786,7 +787,7 @@ static struct lm78_data *lm78_update_device(struct device *dev) data->fan_div[2] = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index c9a7cdea7bd..f72120d08c4 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -28,6 +28,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, @@ -108,7 +109,7 @@ static inline long TEMP_FROM_REG(u16 temp) struct lm80_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -191,10 +192,10 @@ static ssize_t set_in_##suffix(struct device *dev, struct device_attribute *attr struct lm80_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock);\ + mutex_lock(&data->update_lock);\ data->value = IN_TO_REG(val); \ lm80_write_value(client, reg, data->value); \ - up(&data->update_lock);\ + mutex_unlock(&data->update_lock);\ return count; \ } set_in(min0, in_min[0], LM80_REG_IN_MIN(0)); @@ -241,10 +242,10 @@ static ssize_t set_fan_##suffix(struct device *dev, struct device_attribute *att struct lm80_data *data = i2c_get_clientdata(client); \ long val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock);\ + mutex_lock(&data->update_lock);\ data->value = FAN_TO_REG(val, DIV_FROM_REG(data->div)); \ lm80_write_value(client, reg, data->value); \ - up(&data->update_lock);\ + mutex_unlock(&data->update_lock);\ return count; \ } set_fan(min1, fan_min[0], LM80_REG_FAN_MIN(1), fan_div[0]); @@ -263,7 +264,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, u8 reg; /* Save fan_min */ - down(&data->update_lock); + mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); @@ -275,7 +276,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, default: dev_err(&client->dev, "fan_div value %ld not " "supported. Choose one of 1, 2, 4 or 8!\n", val); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } @@ -286,7 +287,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); lm80_write_value(client, LM80_REG_FAN_MIN(nr + 1), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -325,10 +326,10 @@ static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *at struct lm80_data *data = i2c_get_clientdata(client); \ long val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = TEMP_LIMIT_TO_REG(val); \ lm80_write_value(client, reg, data->value); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } set_temp(hot_max, temp_hot_max, LM80_REG_TEMP_HOT_MAX); @@ -437,7 +438,7 @@ static int lm80_detect(struct i2c_adapter *adapter, int address, int kind) /* Fill in the remaining client fields and put it into the global list */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -545,7 +546,7 @@ static struct lm80_data *lm80_update_device(struct device *dev) struct lm80_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { dev_dbg(&client->dev, "Starting lm80 update\n"); @@ -585,7 +586,7 @@ static struct lm80_data *lm80_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c index 26dfa9e216c..aac4ec2bf69 100644 --- a/drivers/hwmon/lm83.c +++ b/drivers/hwmon/lm83.c @@ -35,6 +35,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* * Addresses to scan @@ -139,7 +140,7 @@ static struct i2c_driver lm83_driver = { struct lm83_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -171,11 +172,11 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr, long val = simple_strtol(buf, NULL, 10); int nr = attr->index; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, LM83_REG_W_HIGH[nr - 4], data->temp[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -300,7 +301,7 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind) /* We can fill in the remaining client fields */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -373,7 +374,7 @@ static struct lm83_data *lm83_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lm83_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { int nr; @@ -393,7 +394,7 @@ static struct lm83_data *lm83_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index 7389a012754..342e9663119 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -31,6 +31,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; @@ -331,10 +332,10 @@ struct lm85_autofan { struct lm85_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; int valid; /* !=0 if following fields are valid */ unsigned long last_reading; /* In jiffies */ unsigned long last_config; /* In jiffies */ @@ -373,8 +374,8 @@ static int lm85_detect(struct i2c_adapter *adapter, int address, int kind); static int lm85_detach_client(struct i2c_client *client); -static int lm85_read_value(struct i2c_client *client, u8 register); -static int lm85_write_value(struct i2c_client *client, u8 register, int value); +static int lm85_read_value(struct i2c_client *client, u8 reg); +static int lm85_write_value(struct i2c_client *client, u8 reg, int value); static struct lm85_data *lm85_update_device(struct device *dev); static void lm85_init_client(struct i2c_client *client); @@ -407,10 +408,10 @@ static ssize_t set_fan_min(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val); lm85_write_value(client, LM85_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -499,10 +500,10 @@ static ssize_t set_pwm(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm[nr] = PWM_TO_REG(val); lm85_write_value(client, LM85_REG_PWM(nr), data->pwm[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_enable(struct device *dev, char *buf, int nr) @@ -559,10 +560,10 @@ static ssize_t set_in_min(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = INS_TO_REG(nr, val); lm85_write_value(client, LM85_REG_IN_MIN(nr), data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_in_max(struct device *dev, char *buf, int nr) @@ -577,10 +578,10 @@ static ssize_t set_in_max(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = INS_TO_REG(nr, val); lm85_write_value(client, LM85_REG_IN_MAX(nr), data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define show_in_reg(offset) \ @@ -640,10 +641,10 @@ static ssize_t set_temp_min(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); lm85_write_value(client, LM85_REG_TEMP_MIN(nr), data->temp_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max(struct device *dev, char *buf, int nr) @@ -658,10 +659,10 @@ static ssize_t set_temp_max(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); lm85_write_value(client, LM85_REG_TEMP_MAX(nr), data->temp_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define show_temp_reg(offset) \ @@ -713,12 +714,12 @@ static ssize_t set_pwm_auto_channels(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->autofan[nr].config = (data->autofan[nr].config & (~0xe0)) | ZONE_TO_REG(val) ; lm85_write_value(client, LM85_REG_AFAN_CONFIG(nr), data->autofan[nr].config); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_pwm_min(struct device *dev, char *buf, int nr) @@ -733,11 +734,11 @@ static ssize_t set_pwm_auto_pwm_min(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->autofan[nr].min_pwm = PWM_TO_REG(val); lm85_write_value(client, LM85_REG_AFAN_MINPWM(nr), data->autofan[nr].min_pwm); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_pwm_minctl(struct device *dev, char *buf, int nr) @@ -752,7 +753,7 @@ static ssize_t set_pwm_auto_pwm_minctl(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->autofan[nr].min_off = val; lm85_write_value(client, LM85_REG_AFAN_SPIKE1, data->smooth[0] | data->syncpwm3 @@ -760,7 +761,7 @@ static ssize_t set_pwm_auto_pwm_minctl(struct device *dev, const char *buf, | (data->autofan[1].min_off ? 0x40 : 0) | (data->autofan[2].min_off ? 0x80 : 0) ); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_pwm_freq(struct device *dev, char *buf, int nr) @@ -775,13 +776,13 @@ static ssize_t set_pwm_auto_pwm_freq(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->autofan[nr].freq = FREQ_TO_REG(val); lm85_write_value(client, LM85_REG_AFAN_RANGE(nr), (data->zone[nr].range << 4) | data->autofan[nr].freq ); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define pwm_auto(offset) \ @@ -857,7 +858,7 @@ static ssize_t set_temp_auto_temp_off(struct device *dev, const char *buf, int min; long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); min = TEMP_FROM_REG(data->zone[nr].limit); data->zone[nr].off_desired = TEMP_TO_REG(val); data->zone[nr].hyst = HYST_TO_REG(min - val); @@ -871,7 +872,7 @@ static ssize_t set_temp_auto_temp_off(struct device *dev, const char *buf, (data->zone[2].hyst << 4) ); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_auto_temp_min(struct device *dev, char *buf, int nr) @@ -886,7 +887,7 @@ static ssize_t set_temp_auto_temp_min(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->zone[nr].limit = TEMP_TO_REG(val); lm85_write_value(client, LM85_REG_AFAN_LIMIT(nr), data->zone[nr].limit); @@ -913,7 +914,7 @@ static ssize_t set_temp_auto_temp_min(struct device *dev, const char *buf, (data->zone[2].hyst << 4) ); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_auto_temp_max(struct device *dev, char *buf, int nr) @@ -930,7 +931,7 @@ static ssize_t set_temp_auto_temp_max(struct device *dev, const char *buf, int min; long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); min = TEMP_FROM_REG(data->zone[nr].limit); data->zone[nr].max_desired = TEMP_TO_REG(val); data->zone[nr].range = RANGE_TO_REG( @@ -938,7 +939,7 @@ static ssize_t set_temp_auto_temp_max(struct device *dev, const char *buf, lm85_write_value(client, LM85_REG_AFAN_RANGE(nr), ((data->zone[nr].range & 0x0f) << 4) | (data->autofan[nr].freq & 0x07)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_auto_temp_crit(struct device *dev, char *buf, int nr) @@ -953,11 +954,11 @@ static ssize_t set_temp_auto_temp_crit(struct device *dev, const char *buf, struct lm85_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->zone[nr].critical = TEMP_TO_REG(val); lm85_write_value(client, LM85_REG_AFAN_CRITICAL(nr), data->zone[nr].critical); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define temp_auto(offset) \ @@ -1149,7 +1150,7 @@ static int lm85_detect(struct i2c_adapter *adapter, int address, /* Fill in the remaining client fields */ data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -1368,7 +1369,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) struct lm85_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if ( !data->valid || time_after(jiffies, data->last_reading + LM85_DATA_INTERVAL) ) { @@ -1571,7 +1572,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) data->valid = 1; - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c index 6ba34c302d8..e229daf666d 100644 --- a/drivers/hwmon/lm87.c +++ b/drivers/hwmon/lm87.c @@ -60,6 +60,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> /* * Addresses to scan @@ -176,7 +177,7 @@ static struct i2c_driver lm87_driver = { struct lm87_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -253,11 +254,11 @@ static void set_in_min(struct device *dev, const char *buf, int nr) struct lm87_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val, data->in_scale[nr]); lm87_write_value(client, nr<6 ? LM87_REG_IN_MIN(nr) : LM87_REG_AIN_MIN(nr-6), data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); } static void set_in_max(struct device *dev, const char *buf, int nr) @@ -266,11 +267,11 @@ static void set_in_max(struct device *dev, const char *buf, int nr) struct lm87_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val, data->in_scale[nr]); lm87_write_value(client, nr<6 ? LM87_REG_IN_MAX(nr) : LM87_REG_AIN_MAX(nr-6), data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); } #define set_in(offset) \ @@ -327,10 +328,10 @@ static void set_temp_low(struct device *dev, const char *buf, int nr) struct lm87_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_low[nr] = TEMP_TO_REG(val); lm87_write_value(client, LM87_REG_TEMP_LOW[nr], data->temp_low[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); } static void set_temp_high(struct device *dev, const char *buf, int nr) @@ -339,10 +340,10 @@ static void set_temp_high(struct device *dev, const char *buf, int nr) struct lm87_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_high[nr] = TEMP_TO_REG(val); lm87_write_value(client, LM87_REG_TEMP_HIGH[nr], data->temp_high[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); } #define set_temp(offset) \ @@ -411,11 +412,11 @@ static void set_fan_min(struct device *dev, const char *buf, int nr) struct lm87_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, FAN_DIV_FROM_REG(data->fan_div[nr])); lm87_write_value(client, LM87_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); } /* Note: we save and restore the fan minimum here, because its value is @@ -431,7 +432,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, unsigned long min; u8 reg; - down(&data->update_lock); + mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], FAN_DIV_FROM_REG(data->fan_div[nr])); @@ -441,7 +442,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } @@ -459,7 +460,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, data->fan_min[nr] = FAN_TO_REG(min, val); lm87_write_value(client, LM87_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -522,10 +523,10 @@ static ssize_t set_aout(struct device *dev, struct device_attribute *attr, const struct lm87_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->aout = AOUT_TO_REG(val); lm87_write_value(client, LM87_REG_AOUT, data->aout); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout); @@ -589,7 +590,7 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind) /* We can fill in the remaining client fields */ strlcpy(new_client->name, "lm87", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -744,7 +745,7 @@ static struct lm87_data *lm87_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lm87_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { int i, j; @@ -813,7 +814,7 @@ static struct lm87_data *lm87_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 5679464447c..d9eeaf7585b 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -78,6 +78,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* * Addresses to scan @@ -201,7 +202,7 @@ static struct i2c_driver lm90_driver = { struct lm90_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ int kind; @@ -247,13 +248,13 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr, long val = simple_strtol(buf, NULL, 10); int nr = attr->index; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (data->kind == adt7461) data->temp8[nr] = TEMP1_TO_REG_ADT7461(val); else data->temp8[nr] = TEMP1_TO_REG(val); i2c_smbus_write_byte_data(client, reg[nr - 1], data->temp8[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -281,7 +282,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, long val = simple_strtol(buf, NULL, 10); int nr = attr->index; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (data->kind == adt7461) data->temp11[nr] = TEMP2_TO_REG_ADT7461(val); else @@ -290,7 +291,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, data->temp11[nr] >> 8); i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], data->temp11[nr] & 0xff); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -311,11 +312,11 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy, long val = simple_strtol(buf, NULL, 10); long hyst; - down(&data->update_lock); + mutex_lock(&data->update_lock); hyst = TEMP1_FROM_REG(data->temp8[3]) - val; i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST, HYST_TO_REG(hyst)); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -558,7 +559,7 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; data->kind = kind; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -646,7 +647,7 @@ static struct lm90_data *lm90_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lm90_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { u8 oldh, newh, l; @@ -692,7 +693,7 @@ static struct lm90_data *lm90_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index b0c4cb730a7..197f77226dc 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -46,6 +46,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> /* The LM92 and MAX6635 have 2 two-state pins for address selection, resulting in 4 possible addresses. */ @@ -96,7 +97,7 @@ static struct i2c_driver lm92_driver; struct lm92_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -114,7 +115,7 @@ static struct lm92_data *lm92_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lm92_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { @@ -134,7 +135,7 @@ static struct lm92_data *lm92_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } @@ -158,10 +159,10 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co struct lm92_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = TEMP_TO_REG(val); \ i2c_smbus_write_word_data(client, reg, swab16(data->value)); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } set_temp(temp1_crit, LM92_REG_TEMP_CRIT); @@ -194,11 +195,11 @@ static ssize_t set_temp1_crit_hyst(struct device *dev, struct device_attribute * struct lm92_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp1_hyst = TEMP_FROM_REG(data->temp1_crit) - val; i2c_smbus_write_word_data(client, LM92_REG_TEMP_HYST, swab16(TEMP_TO_REG(data->temp1_hyst))); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -348,7 +349,7 @@ static int lm92_detect(struct i2c_adapter *adapter, int address, int kind) /* Fill in the remaining client fields */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the i2c subsystem a new client has arrived */ if ((err = i2c_attach_client(new_client))) diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index 3abe330b22c..b4135b5971f 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -33,6 +33,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/err.h> +#include <linux/mutex.h> static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, @@ -104,7 +105,7 @@ static struct i2c_driver max1619_driver = { struct max1619_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -141,10 +142,10 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co struct max1619_data *data = i2c_get_clientdata(client); \ long val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->value = TEMP_TO_REG(val); \ i2c_smbus_write_byte_data(client, reg, data->value); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } @@ -262,7 +263,7 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind) /* We can fill in the remaining client fields */ strlcpy(new_client->name, name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -330,7 +331,7 @@ static struct max1619_data *max1619_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct max1619_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { dev_dbg(&client->dev, "Updating max1619 data.\n"); @@ -353,7 +354,7 @@ static struct max1619_data *max1619_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c index f161e88e3bb..ae05e483a77 100644 --- a/drivers/hwmon/pc87360.c +++ b/drivers/hwmon/pc87360.c @@ -43,6 +43,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> static u8 devid; @@ -183,8 +184,8 @@ static inline u8 PWM_TO_REG(int val, int inv) struct pc87360_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; - struct semaphore update_lock; + struct mutex lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -283,7 +284,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, struct pc87360_data *data = i2c_get_clientdata(client); long fan_min = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); fan_min = FAN_TO_REG(fan_min, FAN_DIV_FROM_REG(data->fan_status[attr->index])); /* If it wouldn't fit, change clock divisor */ @@ -300,23 +301,31 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, /* Write new divider, preserve alarm bits */ pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_FAN_STATUS(attr->index), data->fan_status[attr->index] & 0xF9); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define show_and_set_fan(offset) \ -static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ - show_fan_input, NULL, offset-1); \ -static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IWUSR | S_IRUGO, \ - show_fan_min, set_fan_min, offset-1); \ -static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO, \ - show_fan_div, NULL, offset-1); \ -static SENSOR_DEVICE_ATTR(fan##offset##_status, S_IRUGO, \ - show_fan_status, NULL, offset-1); -show_and_set_fan(1) -show_and_set_fan(2) -show_and_set_fan(3) +static struct sensor_device_attribute fan_input[] = { + SENSOR_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0), + SENSOR_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1), + SENSOR_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2), +}; +static struct sensor_device_attribute fan_status[] = { + SENSOR_ATTR(fan1_status, S_IRUGO, show_fan_status, NULL, 0), + SENSOR_ATTR(fan2_status, S_IRUGO, show_fan_status, NULL, 1), + SENSOR_ATTR(fan3_status, S_IRUGO, show_fan_status, NULL, 2), +}; +static struct sensor_device_attribute fan_div[] = { + SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0), + SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1), + SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2), +}; +static struct sensor_device_attribute fan_min[] = { + SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 0), + SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 1), + SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 2), +}; static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -335,21 +344,20 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, con struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm[attr->index] = PWM_TO_REG(val, FAN_CONFIG_INVERT(data->fan_conf, attr->index)); pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_PWM(attr->index), data->pwm[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define show_and_set_pwm(offset) \ -static SENSOR_DEVICE_ATTR(pwm##offset, S_IWUSR | S_IRUGO, \ - show_pwm, set_pwm, offset-1); -show_and_set_pwm(1) -show_and_set_pwm(2) -show_and_set_pwm(3) +static struct sensor_device_attribute pwm[] = { + SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0), + SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1), + SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2), +}; static ssize_t show_in_input(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -386,11 +394,11 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *devattr, struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[attr->index] = IN_TO_REG(val, data->in_vref); pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_IN_MIN, data->in_min[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, @@ -401,35 +409,67 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr, struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[attr->index] = IN_TO_REG(val, data->in_vref); pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_IN_MAX, data->in_max[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define show_and_set_in(offset) \ -static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ - show_in_input, NULL, offset); \ -static SENSOR_DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \ - show_in_min, set_in_min, offset); \ -static SENSOR_DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \ - show_in_max, set_in_max, offset); \ -static SENSOR_DEVICE_ATTR(in##offset##_status, S_IRUGO, \ - show_in_status, NULL, offset); -show_and_set_in(0) -show_and_set_in(1) -show_and_set_in(2) -show_and_set_in(3) -show_and_set_in(4) -show_and_set_in(5) -show_and_set_in(6) -show_and_set_in(7) -show_and_set_in(8) -show_and_set_in(9) -show_and_set_in(10) +static struct sensor_device_attribute in_input[] = { + SENSOR_ATTR(in0_input, S_IRUGO, show_in_input, NULL, 0), + SENSOR_ATTR(in1_input, S_IRUGO, show_in_input, NULL, 1), + SENSOR_ATTR(in2_input, S_IRUGO, show_in_input, NULL, 2), + SENSOR_ATTR(in3_input, S_IRUGO, show_in_input, NULL, 3), + SENSOR_ATTR(in4_input, S_IRUGO, show_in_input, NULL, 4), + SENSOR_ATTR(in5_input, S_IRUGO, show_in_input, NULL, 5), + SENSOR_ATTR(in6_input, S_IRUGO, show_in_input, NULL, 6), + SENSOR_ATTR(in7_input, S_IRUGO, show_in_input, NULL, 7), + SENSOR_ATTR(in8_input, S_IRUGO, show_in_input, NULL, 8), + SENSOR_ATTR(in9_input, S_IRUGO, show_in_input, NULL, 9), + SENSOR_ATTR(in10_input, S_IRUGO, show_in_input, NULL, 10), +}; +static struct sensor_device_attribute in_status[] = { + SENSOR_ATTR(in0_status, S_IRUGO, show_in_status, NULL, 0), + SENSOR_ATTR(in1_status, S_IRUGO, show_in_status, NULL, 1), + SENSOR_ATTR(in2_status, S_IRUGO, show_in_status, NULL, 2), + SENSOR_ATTR(in3_status, S_IRUGO, show_in_status, NULL, 3), + SENSOR_ATTR(in4_status, S_IRUGO, show_in_status, NULL, 4), + SENSOR_ATTR(in5_status, S_IRUGO, show_in_status, NULL, 5), + SENSOR_ATTR(in6_status, S_IRUGO, show_in_status, NULL, 6), + SENSOR_ATTR(in7_status, S_IRUGO, show_in_status, NULL, 7), + SENSOR_ATTR(in8_status, S_IRUGO, show_in_status, NULL, 8), + SENSOR_ATTR(in9_status, S_IRUGO, show_in_status, NULL, 9), + SENSOR_ATTR(in10_status, S_IRUGO, show_in_status, NULL, 10), +}; +static struct sensor_device_attribute in_min[] = { + SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 0), + SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 1), + SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 2), + SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 3), + SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 4), + SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 5), + SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 6), + SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 7), + SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 8), + SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 9), + SENSOR_ATTR(in10_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 10), +}; +static struct sensor_device_attribute in_max[] = { + SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 0), + SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 1), + SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 2), + SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 3), + SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 4), + SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 5), + SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 6), + SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 7), + SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 8), + SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 9), + SENSOR_ATTR(in10_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 10), +}; static ssize_t show_therm_input(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -473,11 +513,11 @@ static ssize_t set_therm_min(struct device *dev, struct device_attribute *devatt struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[attr->index] = IN_TO_REG(val, data->in_vref); pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_TEMP_MIN, data->in_min[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_therm_max(struct device *dev, struct device_attribute *devattr, const char *buf, @@ -488,11 +528,11 @@ static ssize_t set_therm_max(struct device *dev, struct device_attribute *devatt struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[attr->index] = IN_TO_REG(val, data->in_vref); pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_TEMP_MAX, data->in_max[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_therm_crit(struct device *dev, struct device_attribute *devattr, const char *buf, @@ -503,28 +543,51 @@ static ssize_t set_therm_crit(struct device *dev, struct device_attribute *devat struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_crit[attr->index-11] = IN_TO_REG(val, data->in_vref); pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_TEMP_CRIT, data->in_crit[attr->index-11]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define show_and_set_therm(offset) \ -static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ - show_therm_input, NULL, 11+offset-4); \ -static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \ - show_therm_min, set_therm_min, 11+offset-4); \ -static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \ - show_therm_max, set_therm_max, 11+offset-4); \ -static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IWUSR | S_IRUGO, \ - show_therm_crit, set_therm_crit, 11+offset-4); \ -static SENSOR_DEVICE_ATTR(temp##offset##_status, S_IRUGO, \ - show_therm_status, NULL, 11+offset-4); -show_and_set_therm(4) -show_and_set_therm(5) -show_and_set_therm(6) +/* the +11 term below reflects the fact that VLM units 11,12,13 are + used in the chip to measure voltage across the thermistors +*/ +static struct sensor_device_attribute therm_input[] = { + SENSOR_ATTR(temp4_input, S_IRUGO, show_therm_input, NULL, 0+11), + SENSOR_ATTR(temp5_input, S_IRUGO, show_therm_input, NULL, 1+11), + SENSOR_ATTR(temp6_input, S_IRUGO, show_therm_input, NULL, 2+11), +}; +static struct sensor_device_attribute therm_status[] = { + SENSOR_ATTR(temp4_status, S_IRUGO, show_therm_status, NULL, 0+11), + SENSOR_ATTR(temp5_status, S_IRUGO, show_therm_status, NULL, 1+11), + SENSOR_ATTR(temp6_status, S_IRUGO, show_therm_status, NULL, 2+11), +}; +static struct sensor_device_attribute therm_min[] = { + SENSOR_ATTR(temp4_min, S_IRUGO | S_IWUSR, + show_therm_min, set_therm_min, 0+11), + SENSOR_ATTR(temp5_min, S_IRUGO | S_IWUSR, + show_therm_min, set_therm_min, 1+11), + SENSOR_ATTR(temp6_min, S_IRUGO | S_IWUSR, + show_therm_min, set_therm_min, 2+11), +}; +static struct sensor_device_attribute therm_max[] = { + SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, + show_therm_max, set_therm_max, 0+11), + SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, + show_therm_max, set_therm_max, 1+11), + SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, + show_therm_max, set_therm_max, 2+11), +}; +static struct sensor_device_attribute therm_crit[] = { + SENSOR_ATTR(temp4_crit, S_IRUGO | S_IWUSR, + show_therm_crit, set_therm_crit, 0+11), + SENSOR_ATTR(temp5_crit, S_IRUGO | S_IWUSR, + show_therm_crit, set_therm_crit, 1+11), + SENSOR_ATTR(temp6_crit, S_IRUGO | S_IWUSR, + show_therm_crit, set_therm_crit, 2+11), +}; static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { @@ -592,11 +655,11 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *devattr struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_min[attr->index] = TEMP_TO_REG(val); pc87360_write_value(data, LD_TEMP, attr->index, PC87365_REG_TEMP_MIN, data->temp_min[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, @@ -607,11 +670,11 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[attr->index] = TEMP_TO_REG(val); pc87360_write_value(data, LD_TEMP, attr->index, PC87365_REG_TEMP_MAX, data->temp_max[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, @@ -622,28 +685,48 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *devatt struct pc87360_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_crit[attr->index] = TEMP_TO_REG(val); pc87360_write_value(data, LD_TEMP, attr->index, PC87365_REG_TEMP_CRIT, data->temp_crit[attr->index]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define show_and_set_temp(offset) \ -static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ - show_temp_input, NULL, offset-1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \ - show_temp_min, set_temp_min, offset-1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \ - show_temp_max, set_temp_max, offset-1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IWUSR | S_IRUGO, \ - show_temp_crit, set_temp_crit, offset-1); \ -static SENSOR_DEVICE_ATTR(temp##offset##_status, S_IRUGO, \ - show_temp_status, NULL, offset-1); -show_and_set_temp(1) -show_and_set_temp(2) -show_and_set_temp(3) +static struct sensor_device_attribute temp_input[] = { + SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0), + SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1), + SENSOR_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2), +}; +static struct sensor_device_attribute temp_status[] = { + SENSOR_ATTR(temp1_status, S_IRUGO, show_temp_status, NULL, 0), + SENSOR_ATTR(temp2_status, S_IRUGO, show_temp_status, NULL, 1), + SENSOR_ATTR(temp3_status, S_IRUGO, show_temp_status, NULL, 2), +}; +static struct sensor_device_attribute temp_min[] = { + SENSOR_ATTR(temp1_min, S_IRUGO | S_IWUSR, + show_temp_min, set_temp_min, 0), + SENSOR_ATTR(temp2_min, S_IRUGO | S_IWUSR, + show_temp_min, set_temp_min, 1), + SENSOR_ATTR(temp3_min, S_IRUGO | S_IWUSR, + show_temp_min, set_temp_min, 2), +}; +static struct sensor_device_attribute temp_max[] = { + SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, + show_temp_max, set_temp_max, 0), + SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, + show_temp_max, set_temp_max, 1), + SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, + show_temp_max, set_temp_max, 2), +}; +static struct sensor_device_attribute temp_crit[] = { + SENSOR_ATTR(temp1_crit, S_IRUGO | S_IWUSR, + show_temp_crit, set_temp_crit, 0), + SENSOR_ATTR(temp2_crit, S_IRUGO | S_IWUSR, + show_temp_crit, set_temp_crit, 1), + SENSOR_ATTR(temp3_crit, S_IRUGO | S_IWUSR, + show_temp_crit, set_temp_crit, 2), +}; static ssize_t show_temp_alarms(struct device *dev, struct device_attribute *attr, char *buf) { @@ -749,22 +832,24 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses static int pc87360_detect(struct i2c_adapter *adapter) { int i; - struct i2c_client *new_client; + struct i2c_client *client; struct pc87360_data *data; int err = 0; const char *name = "pc87360"; int use_thermistors = 0; + struct device *dev; if (!(data = kzalloc(sizeof(struct pc87360_data), GFP_KERNEL))) return -ENOMEM; - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - init_MUTEX(&data->lock); - new_client->adapter = adapter; - new_client->driver = &pc87360_driver; - new_client->flags = 0; + client = &data->client; + dev = &client->dev; + i2c_set_clientdata(client, data); + client->addr = address; + mutex_init(&data->lock); + client->adapter = adapter; + client->driver = &pc87360_driver; + client->flags = 0; data->fannr = 2; data->innr = 0; @@ -792,15 +877,15 @@ static int pc87360_detect(struct i2c_adapter *adapter) break; } - strcpy(new_client->name, name); + strlcpy(client->name, name, sizeof(client->name)); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); for (i = 0; i < 3; i++) { if (((data->address[i] = extra_isa[i])) && !request_region(extra_isa[i], PC87360_EXTENT, pc87360_driver.driver.name)) { - dev_err(&new_client->dev, "Region 0x%x-0x%x already " + dev_err(&client->dev, "Region 0x%x-0x%x already " "in use!\n", extra_isa[i], extra_isa[i]+PC87360_EXTENT-1); for (i--; i >= 0; i--) @@ -814,7 +899,7 @@ static int pc87360_detect(struct i2c_adapter *adapter) if (data->fannr) data->fan_conf = confreg[0] | (confreg[1] << 8); - if ((err = i2c_attach_client(new_client))) + if ((err = i2c_attach_client(client))) goto ERROR2; /* Use the correct reference voltage @@ -828,7 +913,7 @@ static int pc87360_detect(struct i2c_adapter *adapter) PC87365_REG_TEMP_CONFIG); } data->in_vref = (i&0x02) ? 3025 : 2966; - dev_dbg(&new_client->dev, "Using %s reference voltage\n", + dev_dbg(&client->dev, "Using %s reference voltage\n", (i&0x02) ? "external" : "internal"); data->vid_conf = confreg[3]; @@ -847,154 +932,64 @@ static int pc87360_detect(struct i2c_adapter *adapter) if (devid == 0xe9 && data->address[1]) /* PC87366 */ use_thermistors = confreg[2] & 0x40; - pc87360_init_client(new_client, use_thermistors); + pc87360_init_client(client, use_thermistors); } /* Register sysfs hooks */ - data->class_dev = hwmon_device_register(&new_client->dev); + data->class_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->class_dev)) { err = PTR_ERR(data->class_dev); goto ERROR3; } if (data->innr) { - device_create_file(&new_client->dev, &sensor_dev_attr_in0_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in1_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in2_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in3_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in4_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in5_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in6_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in7_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in8_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in9_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in10_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in0_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in1_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in2_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in3_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in4_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in5_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in6_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in7_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in8_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in9_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in10_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in0_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in1_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in2_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in3_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in4_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in5_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in6_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in7_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in8_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in9_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in10_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in0_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in1_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in2_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in3_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in4_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in5_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in6_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in7_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in8_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in9_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_in10_status.dev_attr); - - device_create_file(&new_client->dev, &dev_attr_cpu0_vid); - device_create_file(&new_client->dev, &dev_attr_vrm); - device_create_file(&new_client->dev, &dev_attr_alarms_in); + for (i = 0; i < 11; i++) { + device_create_file(dev, &in_input[i].dev_attr); + device_create_file(dev, &in_min[i].dev_attr); + device_create_file(dev, &in_max[i].dev_attr); + device_create_file(dev, &in_status[i].dev_attr); + } + device_create_file(dev, &dev_attr_cpu0_vid); + device_create_file(dev, &dev_attr_vrm); + device_create_file(dev, &dev_attr_alarms_in); } if (data->tempnr) { - device_create_file(&new_client->dev, &sensor_dev_attr_temp1_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp2_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp1_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp2_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp1_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp2_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp1_crit.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp2_crit.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp1_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp2_status.dev_attr); - - device_create_file(&new_client->dev, &dev_attr_alarms_temp); - } - if (data->tempnr == 3) { - device_create_file(&new_client->dev, &sensor_dev_attr_temp3_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp3_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp3_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp3_crit.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp3_status.dev_attr); - } - if (data->innr == 14) { - device_create_file(&new_client->dev, &sensor_dev_attr_temp4_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp5_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp6_input.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp4_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp5_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp6_min.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp4_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp5_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp6_max.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp4_crit.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp5_crit.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp6_crit.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp4_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp5_status.dev_attr); - device_create_file(&new_client->dev, &sensor_dev_attr_temp6_status.dev_attr); - } - - if (data->fannr) { - if (FAN_CONFIG_MONITOR(data->fan_conf, 0)) { - device_create_file(&new_client->dev, - &sensor_dev_attr_fan1_input.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan1_min.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan1_div.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan1_status.dev_attr); + for (i = 0; i < data->tempnr; i++) { + device_create_file(dev, &temp_input[i].dev_attr); + device_create_file(dev, &temp_min[i].dev_attr); + device_create_file(dev, &temp_max[i].dev_attr); + device_create_file(dev, &temp_crit[i].dev_attr); + device_create_file(dev, &temp_status[i].dev_attr); } + device_create_file(dev, &dev_attr_alarms_temp); + } - if (FAN_CONFIG_MONITOR(data->fan_conf, 1)) { - device_create_file(&new_client->dev, - &sensor_dev_attr_fan2_input.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan2_min.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan2_div.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan2_status.dev_attr); + if (data->innr == 14) { + for (i = 0; i < 3; i++) { + device_create_file(dev, &therm_input[i].dev_attr); + device_create_file(dev, &therm_min[i].dev_attr); + device_create_file(dev, &therm_max[i].dev_attr); + device_create_file(dev, &therm_crit[i].dev_attr); + device_create_file(dev, &therm_status[i].dev_attr); } - - if (FAN_CONFIG_CONTROL(data->fan_conf, 0)) - device_create_file(&new_client->dev, &sensor_dev_attr_pwm1.dev_attr); - if (FAN_CONFIG_CONTROL(data->fan_conf, 1)) - device_create_file(&new_client->dev, &sensor_dev_attr_pwm2.dev_attr); } - if (data->fannr == 3) { - if (FAN_CONFIG_MONITOR(data->fan_conf, 2)) { - device_create_file(&new_client->dev, - &sensor_dev_attr_fan3_input.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan3_min.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan3_div.dev_attr); - device_create_file(&new_client->dev, - &sensor_dev_attr_fan3_status.dev_attr); - } - if (FAN_CONFIG_CONTROL(data->fan_conf, 2)) - device_create_file(&new_client->dev, &sensor_dev_attr_pwm3.dev_attr); + for (i = 0; i < data->fannr; i++) { + if (FAN_CONFIG_MONITOR(data->fan_conf, i)) { + device_create_file(dev, &fan_input[i].dev_attr); + device_create_file(dev, &fan_min[i].dev_attr); + device_create_file(dev, &fan_div[i].dev_attr); + device_create_file(dev, &fan_status[i].dev_attr); + } + if (FAN_CONFIG_CONTROL(data->fan_conf, i)) + device_create_file(dev, &pwm[i].dev_attr); } return 0; ERROR3: - i2c_detach_client(new_client); + i2c_detach_client(client); ERROR2: for (i = 0; i < 3; i++) { if (data->address[i]) { @@ -1033,11 +1028,11 @@ static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank, { int res; - down(&(data->lock)); + mutex_lock(&(data->lock)); if (bank != NO_BANK) outb_p(bank, data->address[ldi] + PC87365_REG_BANK); res = inb_p(data->address[ldi] + reg); - up(&(data->lock)); + mutex_unlock(&(data->lock)); return res; } @@ -1045,11 +1040,11 @@ static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank, static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank, u8 reg, u8 value) { - down(&(data->lock)); + mutex_lock(&(data->lock)); if (bank != NO_BANK) outb_p(bank, data->address[ldi] + PC87365_REG_BANK); outb_p(value, data->address[ldi] + reg); - up(&(data->lock)); + mutex_unlock(&(data->lock)); } static void pc87360_init_client(struct i2c_client *client, int use_thermistors) @@ -1071,7 +1066,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors) } nr = data->innr < 11 ? data->innr : 11; - for (i=0; i<nr; i++) { + for (i = 0; i < nr; i++) { if (init >= init_in[i]) { /* Forcibly enable voltage channel */ reg = pc87360_read_value(data, LD_IN, i, @@ -1088,14 +1083,14 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors) /* We can't blindly trust the Super-I/O space configuration bit, most BIOS won't set it properly */ - for (i=11; i<data->innr; i++) { + for (i = 11; i < data->innr; i++) { reg = pc87360_read_value(data, LD_IN, i, PC87365_REG_TEMP_STATUS); use_thermistors = use_thermistors || (reg & 0x01); } i = use_thermistors ? 2 : 0; - for (; i<data->tempnr; i++) { + for (; i < data->tempnr; i++) { if (init >= init_temp[i]) { /* Forcibly enable temperature channel */ reg = pc87360_read_value(data, LD_TEMP, i, @@ -1111,7 +1106,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors) } if (use_thermistors) { - for (i=11; i<data->innr; i++) { + for (i = 11; i < data->innr; i++) { if (init >= init_in[i]) { /* The pin may already be used by thermal diodes */ @@ -1221,7 +1216,7 @@ static struct pc87360_data *pc87360_update_device(struct device *dev) struct pc87360_data *data = i2c_get_clientdata(client); u8 i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { dev_dbg(&client->dev, "Data update\n"); @@ -1321,7 +1316,7 @@ static struct pc87360_data *pc87360_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c index 8be5189d9bd..6f3fda73f70 100644 --- a/drivers/hwmon/sis5595.c +++ b/drivers/hwmon/sis5595.c @@ -60,6 +60,7 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/jiffies.h> +#include <linux/mutex.h> #include <asm/io.h> @@ -167,9 +168,9 @@ static inline u8 DIV_TO_REG(int val) struct sis5595_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ char maxins; /* == 3 if temp enabled, otherwise == 4 */ @@ -192,8 +193,8 @@ static struct pci_dev *s_bridge; /* pointer to the (only) sis5595 */ static int sis5595_detect(struct i2c_adapter *adapter); static int sis5595_detach_client(struct i2c_client *client); -static int sis5595_read_value(struct i2c_client *client, u8 register); -static int sis5595_write_value(struct i2c_client *client, u8 register, u8 value); +static int sis5595_read_value(struct i2c_client *client, u8 reg); +static int sis5595_write_value(struct i2c_client *client, u8 reg, u8 value); static struct sis5595_data *sis5595_update_device(struct device *dev); static void sis5595_init_client(struct i2c_client *client); @@ -231,10 +232,10 @@ static ssize_t set_in_min(struct device *dev, const char *buf, struct sis5595_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val); sis5595_write_value(client, SIS5595_REG_IN_MIN(nr), data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -245,10 +246,10 @@ static ssize_t set_in_max(struct device *dev, const char *buf, struct sis5595_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val); sis5595_write_value(client, SIS5595_REG_IN_MAX(nr), data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -310,10 +311,10 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr, struct sis5595_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_over = TEMP_TO_REG(val); sis5595_write_value(client, SIS5595_REG_TEMP_OVER, data->temp_over); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -329,10 +330,10 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr, struct sis5595_data *data = i2c_get_clientdata(client); long val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_hyst = TEMP_TO_REG(val); sis5595_write_value(client, SIS5595_REG_TEMP_HYST, data->temp_hyst); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -364,10 +365,10 @@ static ssize_t set_fan_min(struct device *dev, const char *buf, struct sis5595_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); sis5595_write_value(client, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -390,7 +391,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, unsigned long val = simple_strtoul(buf, NULL, 10); int reg; - down(&data->update_lock); + mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); reg = sis5595_read_value(client, SIS5595_REG_FANDIV); @@ -403,7 +404,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, default: dev_err(&client->dev, "fan_div value %ld not " "supported. Choose one of 1, 2, 4 or 8!\n", val); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } @@ -419,7 +420,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); sis5595_write_value(client, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -527,7 +528,7 @@ static int sis5595_detect(struct i2c_adapter *adapter) new_client = &data->client; new_client->addr = address; - init_MUTEX(&data->lock); + mutex_init(&data->lock); i2c_set_clientdata(new_client, data); new_client->adapter = adapter; new_client->driver = &sis5595_driver; @@ -548,7 +549,7 @@ static int sis5595_detect(struct i2c_adapter *adapter) strlcpy(new_client->name, "sis5595", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -635,20 +636,20 @@ static int sis5595_read_value(struct i2c_client *client, u8 reg) int res; struct sis5595_data *data = i2c_get_clientdata(client); - down(&data->lock); + mutex_lock(&data->lock); outb_p(reg, client->addr + SIS5595_ADDR_REG_OFFSET); res = inb_p(client->addr + SIS5595_DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return res; } static int sis5595_write_value(struct i2c_client *client, u8 reg, u8 value) { struct sis5595_data *data = i2c_get_clientdata(client); - down(&data->lock); + mutex_lock(&data->lock); outb_p(reg, client->addr + SIS5595_ADDR_REG_OFFSET); outb_p(value, client->addr + SIS5595_DATA_REG_OFFSET); - up(&data->lock); + mutex_unlock(&data->lock); return 0; } @@ -667,7 +668,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev) struct sis5595_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -707,7 +708,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c index 8663bbbe97f..b6086186d22 100644 --- a/drivers/hwmon/smsc47b397.c +++ b/drivers/hwmon/smsc47b397.c @@ -35,6 +35,7 @@ #include <linux/hwmon.h> #include <linux/err.h> #include <linux/init.h> +#include <linux/mutex.h> #include <asm/io.h> /* Address is autodetected, there is no default value */ @@ -92,9 +93,9 @@ static u8 smsc47b397_reg_temp[] = {0x25, 0x26, 0x27, 0x80}; struct smsc47b397_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; - struct semaphore update_lock; + struct mutex update_lock; unsigned long last_updated; /* in jiffies */ int valid; @@ -108,10 +109,10 @@ static int smsc47b397_read_value(struct i2c_client *client, u8 reg) struct smsc47b397_data *data = i2c_get_clientdata(client); int res; - down(&data->lock); + mutex_lock(&data->lock); outb(reg, client->addr); res = inb_p(client->addr + 1); - up(&data->lock); + mutex_unlock(&data->lock); return res; } @@ -121,7 +122,7 @@ static struct smsc47b397_data *smsc47b397_update_device(struct device *dev) struct smsc47b397_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { dev_dbg(&client->dev, "starting device update...\n"); @@ -144,7 +145,7 @@ static struct smsc47b397_data *smsc47b397_update_device(struct device *dev) dev_dbg(&client->dev, "... device update complete\n"); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } @@ -254,14 +255,14 @@ static int smsc47b397_detect(struct i2c_adapter *adapter) new_client = &data->client; i2c_set_clientdata(new_client, data); new_client->addr = address; - init_MUTEX(&data->lock); + mutex_init(&data->lock); new_client->adapter = adapter; new_client->driver = &smsc47b397_driver; new_client->flags = 0; strlcpy(new_client->name, "smsc47b397", I2C_NAME_SIZE); - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); if ((err = i2c_attach_client(new_client))) goto error_free; diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index d1e3ec0fe4d..7732aec5459 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c @@ -34,6 +34,7 @@ #include <linux/hwmon.h> #include <linux/err.h> #include <linux/init.h> +#include <linux/mutex.h> #include <asm/io.h> /* Address is autodetected, there is no default value */ @@ -102,9 +103,9 @@ superio_exit(void) struct smsc47m1_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; - struct semaphore update_lock; + struct mutex update_lock; unsigned long last_updated; /* In jiffies */ u8 fan[2]; /* Register value */ @@ -188,18 +189,18 @@ static ssize_t set_fan_min(struct device *dev, const char *buf, struct smsc47m1_data *data = i2c_get_clientdata(client); long rpmdiv, val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); rpmdiv = val * DIV_FROM_REG(data->fan_div[nr]); if (983040 > 192 * rpmdiv || 2 * rpmdiv > 983040) { - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } data->fan_preload[nr] = 192 - ((983040 + rpmdiv / 2) / rpmdiv); smsc47m1_write_value(client, SMSC47M1_REG_FAN_PRELOAD(nr), data->fan_preload[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -220,14 +221,14 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, if (new_div == old_div) /* No change */ return count; - down(&data->update_lock); + mutex_lock(&data->update_lock); switch (new_div) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } @@ -241,7 +242,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, data->fan_preload[nr] = SENSORS_LIMIT(tmp, 0, 191); smsc47m1_write_value(client, SMSC47M1_REG_FAN_PRELOAD(nr), data->fan_preload[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -257,12 +258,12 @@ static ssize_t set_pwm(struct device *dev, const char *buf, if (val < 0 || val > 255) return -EINVAL; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm[nr] &= 0x81; /* Preserve additional bits */ data->pwm[nr] |= PWM_TO_REG(val); smsc47m1_write_value(client, SMSC47M1_REG_PWM(nr), data->pwm[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -278,12 +279,12 @@ static ssize_t set_pwm_en(struct device *dev, const char *buf, if (val != 0 && val != 1) return -EINVAL; - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm[nr] &= 0xFE; /* preserve the other bits */ data->pwm[nr] |= !val; smsc47m1_write_value(client, SMSC47M1_REG_PWM(nr), data->pwm[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -408,13 +409,13 @@ static int smsc47m1_detect(struct i2c_adapter *adapter) new_client = &data->client; i2c_set_clientdata(new_client, data); new_client->addr = address; - init_MUTEX(&data->lock); + mutex_init(&data->lock); new_client->adapter = adapter; new_client->driver = &smsc47m1_driver; new_client->flags = 0; strlcpy(new_client->name, "smsc47m1", I2C_NAME_SIZE); - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* If no function is properly configured, there's no point in actually registering the chip. */ @@ -512,17 +513,17 @@ static int smsc47m1_read_value(struct i2c_client *client, u8 reg) { int res; - down(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); + mutex_lock(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); res = inb_p(client->addr + reg); - up(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); + mutex_unlock(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); return res; } static void smsc47m1_write_value(struct i2c_client *client, u8 reg, u8 value) { - down(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); + mutex_lock(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); outb_p(value, client->addr + reg); - up(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); + mutex_unlock(&((struct smsc47m1_data *) i2c_get_clientdata(client))->lock); } static struct smsc47m1_data *smsc47m1_update_device(struct device *dev, @@ -531,7 +532,7 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev, struct i2c_client *client = to_i2c_client(dev); struct smsc47m1_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || init) { int i; @@ -558,7 +559,7 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev, data->last_updated = jiffies; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c index cb01848729b..166298f1f19 100644 --- a/drivers/hwmon/via686a.c +++ b/drivers/hwmon/via686a.c @@ -39,6 +39,7 @@ #include <linux/hwmon.h> #include <linux/err.h> #include <linux/init.h> +#include <linux/mutex.h> #include <asm/io.h> @@ -296,7 +297,7 @@ static inline long TEMP_FROM_REG10(u16 val) struct via686a_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -355,11 +356,11 @@ static ssize_t set_in_min(struct device *dev, const char *buf, struct via686a_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val, nr); via686a_write_value(client, VIA686A_REG_IN_MIN(nr), data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, const char *buf, @@ -368,11 +369,11 @@ static ssize_t set_in_max(struct device *dev, const char *buf, struct via686a_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val, nr); via686a_write_value(client, VIA686A_REG_IN_MAX(nr), data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define show_in_offset(offset) \ @@ -432,11 +433,11 @@ static ssize_t set_temp_over(struct device *dev, const char *buf, struct via686a_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_over[nr] = TEMP_TO_REG(val); via686a_write_value(client, VIA686A_REG_TEMP_OVER[nr], data->temp_over[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_hyst(struct device *dev, const char *buf, @@ -445,11 +446,11 @@ static ssize_t set_temp_hyst(struct device *dev, const char *buf, struct via686a_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_hyst[nr] = TEMP_TO_REG(val); via686a_write_value(client, VIA686A_REG_TEMP_HYST[nr], data->temp_hyst[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } #define show_temp_offset(offset) \ @@ -508,10 +509,10 @@ static ssize_t set_fan_min(struct device *dev, const char *buf, struct via686a_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); via686a_write_value(client, VIA686A_REG_FAN_MIN(nr+1), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, const char *buf, @@ -521,12 +522,12 @@ static ssize_t set_fan_div(struct device *dev, const char *buf, int val = simple_strtol(buf, NULL, 10); int old; - down(&data->update_lock); + mutex_lock(&data->update_lock); old = via686a_read_value(client, VIA686A_REG_FANDIV); data->fan_div[nr] = DIV_TO_REG(val); old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4); via686a_write_value(client, VIA686A_REG_FANDIV, old); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -639,7 +640,7 @@ static int via686a_detect(struct i2c_adapter *adapter) strlcpy(new_client->name, client_name, I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) goto exit_free; @@ -733,7 +734,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) struct via686a_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -788,7 +789,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index 271e9cb9532..686f3deb309 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c @@ -35,6 +35,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> static int force_addr; @@ -148,7 +149,7 @@ static inline u8 FAN_TO_REG(long rpm, int div) struct vt8231_data { struct i2c_client client; - struct semaphore update_lock; + struct mutex update_lock; struct class_device *class_dev; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -223,10 +224,10 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(client, regvoltmin[nr], data->in_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -239,10 +240,10 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(client, regvoltmax[nr], data->in_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -281,11 +282,11 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(client, regvoltmin[5], data->in_min[5]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -296,11 +297,11 @@ static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(client, regvoltmax[5], data->in_max[5]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -351,10 +352,10 @@ static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(client, regtempmax[0], data->temp_max[0]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr, @@ -364,10 +365,10 @@ static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(client, regtempmin[0], data->temp_min[0]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -407,10 +408,10 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(client, regtempmax[nr], data->temp_max[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, @@ -422,10 +423,10 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(client, regtempmin[nr], data->temp_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -520,10 +521,10 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, struct vt8231_data *data = i2c_get_clientdata(client); int val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); vt8231_write_value(client, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -539,7 +540,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, long min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); - down(&data->update_lock); + mutex_lock(&data->update_lock); switch (val) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; @@ -548,7 +549,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, default: dev_err(&client->dev, "fan_div value %ld not supported." "Choose one of 1, 2, 4 or 8!\n", val); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } @@ -558,7 +559,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4); vt8231_write_value(client, VT8231_REG_FANDIV, old); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -660,7 +661,7 @@ int vt8231_detect(struct i2c_adapter *adapter) /* Fill in the remaining client fields and put into the global list */ strlcpy(client->name, "vt8231", I2C_NAME_SIZE); - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(client))) @@ -745,7 +746,7 @@ static struct vt8231_data *vt8231_update_device(struct device *dev) int i; u16 low; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -804,7 +805,7 @@ static struct vt8231_data *vt8231_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 12d79f5e490..b6bd5685fd3 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -42,7 +42,9 @@ #include <linux/i2c.h> #include <linux/i2c-isa.h> #include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> #include "lm75.h" @@ -177,9 +179,9 @@ temp1_to_reg(int temp) struct w83627ehf_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -230,7 +232,7 @@ static u16 w83627ehf_read_value(struct i2c_client *client, u16 reg) struct w83627ehf_data *data = i2c_get_clientdata(client); int res, word_sized = is_word_sized(reg); - down(&data->lock); + mutex_lock(&data->lock); w83627ehf_set_bank(client, reg); outb_p(reg & 0xff, client->addr + ADDR_REG_OFFSET); @@ -242,7 +244,7 @@ static u16 w83627ehf_read_value(struct i2c_client *client, u16 reg) } w83627ehf_reset_bank(client, reg); - up(&data->lock); + mutex_unlock(&data->lock); return res; } @@ -252,7 +254,7 @@ static int w83627ehf_write_value(struct i2c_client *client, u16 reg, u16 value) struct w83627ehf_data *data = i2c_get_clientdata(client); int word_sized = is_word_sized(reg); - down(&data->lock); + mutex_lock(&data->lock); w83627ehf_set_bank(client, reg); outb_p(reg & 0xff, client->addr + ADDR_REG_OFFSET); @@ -264,7 +266,7 @@ static int w83627ehf_write_value(struct i2c_client *client, u16 reg, u16 value) outb_p(value & 0xff, client->addr + DATA_REG_OFFSET); w83627ehf_reset_bank(client, reg); - up(&data->lock); + mutex_unlock(&data->lock); return 0; } @@ -322,7 +324,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) struct w83627ehf_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { @@ -397,7 +399,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } @@ -407,9 +409,12 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) #define show_fan_reg(reg) \ static ssize_t \ -show_##reg(struct device *dev, char *buf, int nr) \ +show_##reg(struct device *dev, struct device_attribute *attr, \ + char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", \ fan_from_reg(data->reg[nr], \ div_from_reg(data->fan_div[nr]))); \ @@ -418,23 +423,28 @@ show_fan_reg(fan); show_fan_reg(fan_min); static ssize_t -show_fan_div(struct device *dev, char *buf, int nr) +show_fan_div(struct device *dev, struct device_attribute *attr, + char *buf) { struct w83627ehf_data *data = w83627ehf_update_device(dev); - return sprintf(buf, "%u\n", - div_from_reg(data->fan_div[nr])); + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); + int nr = sensor_attr->index; + return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr])); } static ssize_t -store_fan_min(struct device *dev, const char *buf, size_t count, int nr) +store_fan_min(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83627ehf_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); + int nr = sensor_attr->index; unsigned int val = simple_strtoul(buf, NULL, 10); unsigned int reg; u8 new_div; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (!val) { /* No min limit, alarm disabled */ data->fan_min[nr] = 255; @@ -482,63 +492,46 @@ store_fan_min(struct device *dev, const char *buf, size_t count, int nr) } w83627ehf_write_value(client, W83627EHF_REG_FAN_MIN[nr], data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } -#define sysfs_fan_offset(offset) \ -static ssize_t \ -show_reg_fan_##offset(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - return show_fan(dev, buf, offset-1); \ -} \ -static DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ - show_reg_fan_##offset, NULL); +static struct sensor_device_attribute sda_fan_input[] = { + SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), + SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), + SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), + SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3), + SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4), +}; -#define sysfs_fan_min_offset(offset) \ -static ssize_t \ -show_reg_fan##offset##_min(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - return show_fan_min(dev, buf, offset-1); \ -} \ -static ssize_t \ -store_reg_fan##offset##_min(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - return store_fan_min(dev, buf, count, offset-1); \ -} \ -static DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ - show_reg_fan##offset##_min, \ - store_reg_fan##offset##_min); +static struct sensor_device_attribute sda_fan_min[] = { + SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, + store_fan_min, 0), + SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, + store_fan_min, 1), + SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, + store_fan_min, 2), + SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, + store_fan_min, 3), + SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, + store_fan_min, 4), +}; -#define sysfs_fan_div_offset(offset) \ -static ssize_t \ -show_reg_fan##offset##_div(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - return show_fan_div(dev, buf, offset - 1); \ -} \ -static DEVICE_ATTR(fan##offset##_div, S_IRUGO, \ - show_reg_fan##offset##_div, NULL); - -sysfs_fan_offset(1); -sysfs_fan_min_offset(1); -sysfs_fan_div_offset(1); -sysfs_fan_offset(2); -sysfs_fan_min_offset(2); -sysfs_fan_div_offset(2); -sysfs_fan_offset(3); -sysfs_fan_min_offset(3); -sysfs_fan_div_offset(3); -sysfs_fan_offset(4); -sysfs_fan_min_offset(4); -sysfs_fan_div_offset(4); -sysfs_fan_offset(5); -sysfs_fan_min_offset(5); -sysfs_fan_div_offset(5); +static struct sensor_device_attribute sda_fan_div[] = { + SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0), + SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1), + SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2), + SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3), + SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4), +}; + +static void device_create_file_fan(struct device *dev, int i) +{ + device_create_file(dev, &sda_fan_input[i].dev_attr); + device_create_file(dev, &sda_fan_div[i].dev_attr); + device_create_file(dev, &sda_fan_min[i].dev_attr); +} #define show_temp1_reg(reg) \ static ssize_t \ @@ -561,27 +554,24 @@ store_temp1_##reg(struct device *dev, struct device_attribute *attr, \ struct w83627ehf_data *data = i2c_get_clientdata(client); \ u32 val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->temp1_##reg = temp1_to_reg(val); \ w83627ehf_write_value(client, W83627EHF_REG_TEMP1_##REG, \ data->temp1_##reg); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } store_temp1_reg(OVER, max); store_temp1_reg(HYST, max_hyst); -static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1, NULL); -static DEVICE_ATTR(temp1_max, S_IRUGO| S_IWUSR, - show_temp1_max, store_temp1_max); -static DEVICE_ATTR(temp1_max_hyst, S_IRUGO| S_IWUSR, - show_temp1_max_hyst, store_temp1_max_hyst); - #define show_temp_reg(reg) \ static ssize_t \ -show_##reg (struct device *dev, char *buf, int nr) \ +show_##reg(struct device *dev, struct device_attribute *attr, \ + char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", \ LM75_TEMP_FROM_REG(data->reg[nr])); \ } @@ -591,55 +581,42 @@ show_temp_reg(temp_max_hyst); #define store_temp_reg(REG, reg) \ static ssize_t \ -store_##reg (struct device *dev, const char *buf, size_t count, int nr) \ +store_##reg(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ { \ struct i2c_client *client = to_i2c_client(dev); \ struct w83627ehf_data *data = i2c_get_clientdata(client); \ + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + int nr = sensor_attr->index; \ u32 val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->reg[nr] = LM75_TEMP_TO_REG(val); \ w83627ehf_write_value(client, W83627EHF_REG_TEMP_##REG[nr], \ data->reg[nr]); \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } store_temp_reg(OVER, temp_max); store_temp_reg(HYST, temp_max_hyst); -#define sysfs_temp_offset(offset) \ -static ssize_t \ -show_reg_temp##offset (struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - return show_temp(dev, buf, offset - 2); \ -} \ -static DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ - show_reg_temp##offset, NULL); - -#define sysfs_temp_reg_offset(reg, offset) \ -static ssize_t \ -show_reg_temp##offset##_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - return show_temp_##reg(dev, buf, offset - 2); \ -} \ -static ssize_t \ -store_reg_temp##offset##_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - return store_temp_##reg(dev, buf, count, offset - 2); \ -} \ -static DEVICE_ATTR(temp##offset##_##reg, S_IRUGO| S_IWUSR, \ - show_reg_temp##offset##_##reg, \ - store_reg_temp##offset##_##reg); - -sysfs_temp_offset(2); -sysfs_temp_reg_offset(max, 2); -sysfs_temp_reg_offset(max_hyst, 2); -sysfs_temp_offset(3); -sysfs_temp_reg_offset(max, 3); -sysfs_temp_reg_offset(max_hyst, 3); +static struct sensor_device_attribute sda_temp[] = { + SENSOR_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0), + SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0), + SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 1), + SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp1_max, + store_temp1_max, 0), + SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 0), + SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 1), + SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1_max_hyst, + store_temp1_max_hyst, 0), + SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 0), + SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 1), +}; /* * Driver and client management @@ -673,6 +650,7 @@ static int w83627ehf_detect(struct i2c_adapter *adapter) { struct i2c_client *client; struct w83627ehf_data *data; + struct device *dev; int i, err = 0; if (!request_region(address + REGION_OFFSET, REGION_LENGTH, @@ -689,14 +667,15 @@ static int w83627ehf_detect(struct i2c_adapter *adapter) client = &data->client; i2c_set_clientdata(client, data); client->addr = address; - init_MUTEX(&data->lock); + mutex_init(&data->lock); client->adapter = adapter; client->driver = &w83627ehf_driver; client->flags = 0; + dev = &client->dev; strlcpy(client->name, "w83627ehf", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the i2c layer a new client has arrived */ if ((err = i2c_attach_client(client))) @@ -720,42 +699,18 @@ static int w83627ehf_detect(struct i2c_adapter *adapter) data->has_fan |= (1 << 4); /* Register sysfs hooks */ - data->class_dev = hwmon_device_register(&client->dev); + data->class_dev = hwmon_device_register(dev); if (IS_ERR(data->class_dev)) { err = PTR_ERR(data->class_dev); goto exit_detach; } - device_create_file(&client->dev, &dev_attr_fan1_input); - device_create_file(&client->dev, &dev_attr_fan1_min); - device_create_file(&client->dev, &dev_attr_fan1_div); - device_create_file(&client->dev, &dev_attr_fan2_input); - device_create_file(&client->dev, &dev_attr_fan2_min); - device_create_file(&client->dev, &dev_attr_fan2_div); - device_create_file(&client->dev, &dev_attr_fan3_input); - device_create_file(&client->dev, &dev_attr_fan3_min); - device_create_file(&client->dev, &dev_attr_fan3_div); - - if (data->has_fan & (1 << 3)) { - device_create_file(&client->dev, &dev_attr_fan4_input); - device_create_file(&client->dev, &dev_attr_fan4_min); - device_create_file(&client->dev, &dev_attr_fan4_div); - } - if (data->has_fan & (1 << 4)) { - device_create_file(&client->dev, &dev_attr_fan5_input); - device_create_file(&client->dev, &dev_attr_fan5_min); - device_create_file(&client->dev, &dev_attr_fan5_div); + for (i = 0; i < 5; i++) { + if (data->has_fan & (1 << i)) + device_create_file_fan(dev, i); } - - device_create_file(&client->dev, &dev_attr_temp1_input); - device_create_file(&client->dev, &dev_attr_temp1_max); - device_create_file(&client->dev, &dev_attr_temp1_max_hyst); - device_create_file(&client->dev, &dev_attr_temp2_input); - device_create_file(&client->dev, &dev_attr_temp2_max); - device_create_file(&client->dev, &dev_attr_temp2_max_hyst); - device_create_file(&client->dev, &dev_attr_temp3_input); - device_create_file(&client->dev, &dev_attr_temp3_max); - device_create_file(&client->dev, &dev_attr_temp3_max_hyst); + for (i = 0; i < ARRAY_SIZE(sda_temp); i++) + device_create_file(dev, &sda_temp[i].dev_attr); return 0; diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c index 7ea441d4da6..71fb7f1af8f 100644 --- a/drivers/hwmon/w83627hf.c +++ b/drivers/hwmon/w83627hf.c @@ -28,6 +28,7 @@ w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC) w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC) + w83687thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) w83697hf 8 2 2 2 0x60 0x5ca3 no yes(LPC) For other winbond chips, and for i2c support in the above chips, @@ -46,6 +47,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> #include "lm75.h" @@ -62,7 +64,7 @@ MODULE_PARM_DESC(force_i2c, static unsigned short address; /* Insmod parameters */ -enum chips { any_chip, w83627hf, w83627thf, w83697hf, w83637hf }; +enum chips { any_chip, w83627hf, w83627thf, w83697hf, w83637hf, w83687thf }; static int reset; module_param(reset, bool, 0); @@ -100,6 +102,10 @@ static int VAL; /* The value to read/write */ #define W83627THF_GPIO5_IOSR 0xf3 /* w83627thf only */ #define W83627THF_GPIO5_DR 0xf4 /* w83627thf only */ +#define W83687THF_VID_EN 0x29 /* w83687thf only */ +#define W83687THF_VID_CFG 0xF0 /* w83687thf only */ +#define W83687THF_VID_DATA 0xF1 /* w83687thf only */ + static inline void superio_outb(int reg, int val) { @@ -138,6 +144,7 @@ superio_exit(void) #define W627THF_DEVID 0x82 #define W697_DEVID 0x60 #define W637_DEVID 0x70 +#define W687THF_DEVID 0x85 #define WINB_ACT_REG 0x30 #define WINB_BASE_REG 0x60 /* Constants specified below */ @@ -201,11 +208,11 @@ superio_exit(void) #define W83627HF_REG_PWM1 0x5A #define W83627HF_REG_PWM2 0x5B -#define W83627THF_REG_PWM1 0x01 /* 697HF and 637HF too */ -#define W83627THF_REG_PWM2 0x03 /* 697HF and 637HF too */ -#define W83627THF_REG_PWM3 0x11 /* 637HF too */ +#define W83627THF_REG_PWM1 0x01 /* 697HF/637HF/687THF too */ +#define W83627THF_REG_PWM2 0x03 /* 697HF/637HF/687THF too */ +#define W83627THF_REG_PWM3 0x11 /* 637HF/687THF too */ -#define W83627THF_REG_VRM_OVT_CFG 0x18 /* 637HF too */ +#define W83627THF_REG_VRM_OVT_CFG 0x18 /* 637HF/687THF too */ static const u8 regpwm_627hf[] = { W83627HF_REG_PWM1, W83627HF_REG_PWM2 }; static const u8 regpwm[] = { W83627THF_REG_PWM1, W83627THF_REG_PWM2, @@ -285,10 +292,10 @@ static inline u8 DIV_TO_REG(long val) struct w83627hf_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -318,16 +325,15 @@ struct w83627hf_data { Default = 3435. Other Betas unimplemented */ u8 vrm; - u8 vrm_ovt; /* Register value, 627thf & 637hf only */ + u8 vrm_ovt; /* Register value, 627THF/637HF/687THF only */ }; static int w83627hf_detect(struct i2c_adapter *adapter); static int w83627hf_detach_client(struct i2c_client *client); -static int w83627hf_read_value(struct i2c_client *client, u16 register); -static int w83627hf_write_value(struct i2c_client *client, u16 register, - u16 value); +static int w83627hf_read_value(struct i2c_client *client, u16 reg); +static int w83627hf_write_value(struct i2c_client *client, u16 reg, u16 value); static struct w83627hf_data *w83627hf_update_device(struct device *dev); static void w83627hf_init_client(struct i2c_client *client); @@ -360,12 +366,12 @@ store_in_##reg (struct device *dev, const char *buf, size_t count, int nr) \ \ val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->in_##reg[nr] = IN_TO_REG(val); \ w83627hf_write_value(client, W83781D_REG_IN_##REG(nr), \ data->in_##reg[nr]); \ \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } store_in_reg(MIN, min) @@ -413,7 +419,8 @@ static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg) long in0; if ((data->vrm_ovt & 0x01) && - (w83627thf == data->type || w83637hf == data->type)) + (w83627thf == data->type || w83637hf == data->type + || w83687thf == data->type)) /* use VRM9 calculation */ in0 = (long)((reg * 488 + 70000 + 50) / 100); @@ -451,10 +458,11 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && - (w83627thf == data->type || w83637hf == data->type)) + (w83627thf == data->type || w83637hf == data->type + || w83687thf == data->type)) /* use VRM9 calculation */ data->in_min[0] = @@ -465,7 +473,7 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a data->in_min[0] = IN_TO_REG(val); w83627hf_write_value(client, W83781D_REG_IN_MIN(0), data->in_min[0]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -478,10 +486,11 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && - (w83627thf == data->type || w83637hf == data->type)) + (w83627thf == data->type || w83637hf == data->type + || w83687thf == data->type)) /* use VRM9 calculation */ data->in_max[0] = @@ -492,7 +501,7 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a data->in_max[0] = IN_TO_REG(val); w83627hf_write_value(client, W83781D_REG_IN_MAX(0), data->in_max[0]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -529,13 +538,13 @@ store_fan_min(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr - 1] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr - 1])); w83627hf_write_value(client, W83781D_REG_FAN_MIN(nr), data->fan_min[nr - 1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -597,7 +606,7 @@ store_temp_##reg (struct device *dev, const char *buf, size_t count, int nr) \ \ val = simple_strtoul(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ \ if (nr >= 2) { /* TEMP2 and TEMP3 */ \ data->temp_##reg##_add[nr-2] = LM75_TEMP_TO_REG(val); \ @@ -609,7 +618,7 @@ store_temp_##reg (struct device *dev, const char *buf, size_t count, int nr) \ data->temp_##reg); \ } \ \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } store_temp_reg(OVER, max); @@ -718,7 +727,7 @@ store_beep_reg(struct device *dev, const char *buf, size_t count, val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (update_mask == BEEP_MASK) { /* We are storing beep_mask */ data->beep_mask = BEEP_MASK_TO_REG(val); @@ -736,7 +745,7 @@ store_beep_reg(struct device *dev, const char *buf, size_t count, w83627hf_write_value(client, W83781D_REG_BEEP_INTS2, val2 | data->beep_enable << 7); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -783,7 +792,7 @@ store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr) u8 reg; unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); /* Save fan_min */ min = FAN_FROM_REG(data->fan_min[nr], @@ -805,7 +814,7 @@ store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr) data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); w83627hf_write_value(client, W83781D_REG_FAN_MIN(nr+1), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -848,7 +857,7 @@ store_pwm_reg(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (data->type == w83627thf) { /* bits 0-3 are reserved in 627THF */ @@ -865,7 +874,7 @@ store_pwm_reg(struct device *dev, const char *buf, size_t count, int nr) data->pwm[nr - 1]); } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -907,7 +916,7 @@ store_sensor_reg(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); switch (val) { case 1: /* PII/Celeron diode */ @@ -941,7 +950,7 @@ store_sensor_reg(struct device *dev, const char *buf, size_t count, int nr) break; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -980,7 +989,8 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr) if(val != W627_DEVID && val != W627THF_DEVID && val != W697_DEVID && - val != W637_DEVID) { + val != W637_DEVID && + val != W687THF_DEVID) { superio_exit(); return -ENODEV; } @@ -1034,6 +1044,8 @@ static int w83627hf_detect(struct i2c_adapter *adapter) kind = w83627thf; else if(val == W637_DEVID) kind = w83637hf; + else if (val == W687THF_DEVID) + kind = w83687thf; else { dev_info(&adapter->dev, "Unsupported chip (dev_id=0x%02X).\n", val); @@ -1057,7 +1069,7 @@ static int w83627hf_detect(struct i2c_adapter *adapter) new_client = &data->client; i2c_set_clientdata(new_client, data); new_client->addr = address; - init_MUTEX(&data->lock); + mutex_init(&data->lock); new_client->adapter = adapter; new_client->driver = &w83627hf_driver; new_client->flags = 0; @@ -1071,13 +1083,15 @@ static int w83627hf_detect(struct i2c_adapter *adapter) client_name = "w83697hf"; } else if (kind == w83637hf) { client_name = "w83637hf"; + } else if (kind == w83687thf) { + client_name = "w83687thf"; } /* Fill in the remaining client fields and put into the global list */ strlcpy(new_client->name, client_name, I2C_NAME_SIZE); data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -1106,7 +1120,7 @@ static int w83627hf_detect(struct i2c_adapter *adapter) device_create_file_in(new_client, 2); device_create_file_in(new_client, 3); device_create_file_in(new_client, 4); - if (kind != w83627thf && kind != w83637hf) { + if (kind == w83627hf || kind == w83697hf) { device_create_file_in(new_client, 5); device_create_file_in(new_client, 6); } @@ -1139,7 +1153,7 @@ static int w83627hf_detect(struct i2c_adapter *adapter) device_create_file_pwm(new_client, 1); device_create_file_pwm(new_client, 2); - if (kind == w83627thf || kind == w83637hf) + if (kind == w83627thf || kind == w83637hf || kind == w83687thf) device_create_file_pwm(new_client, 3); device_create_file_sensor(new_client, 1); @@ -1187,7 +1201,7 @@ static int w83627hf_read_value(struct i2c_client *client, u16 reg) struct w83627hf_data *data = i2c_get_clientdata(client); int res, word_sized; - down(&data->lock); + mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x50) @@ -1213,7 +1227,7 @@ static int w83627hf_read_value(struct i2c_client *client, u16 reg) client->addr + W83781D_ADDR_REG_OFFSET); outb_p(0, client->addr + W83781D_DATA_REG_OFFSET); } - up(&data->lock); + mutex_unlock(&data->lock); return res; } @@ -1247,12 +1261,39 @@ exit: return res; } +static int w83687thf_read_vid(struct i2c_client *client) +{ + int res = 0xff; + + superio_enter(); + superio_select(W83627HF_LD_HWM); + + /* Make sure these GPIO pins are enabled */ + if (!(superio_inb(W83687THF_VID_EN) & (1 << 2))) { + dev_dbg(&client->dev, "VID disabled, no VID function\n"); + goto exit; + } + + /* Make sure the pins are configured for input */ + if (!(superio_inb(W83687THF_VID_CFG) & (1 << 4))) { + dev_dbg(&client->dev, "VID configured as output, " + "no VID function\n"); + goto exit; + } + + res = superio_inb(W83687THF_VID_DATA) & 0x3f; + +exit: + superio_exit(); + return res; +} + static int w83627hf_write_value(struct i2c_client *client, u16 reg, u16 value) { struct w83627hf_data *data = i2c_get_clientdata(client); int word_sized; - down(&data->lock); + mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x53) @@ -1277,7 +1318,7 @@ static int w83627hf_write_value(struct i2c_client *client, u16 reg, u16 value) client->addr + W83781D_ADDR_REG_OFFSET); outb_p(0, client->addr + W83781D_DATA_REG_OFFSET); } - up(&data->lock); + mutex_unlock(&data->lock); return 0; } @@ -1324,10 +1365,13 @@ static void w83627hf_init_client(struct i2c_client *client) data->vid = (lo & 0x0f) | ((hi & 0x01) << 4); } else if (w83627thf == data->type) { data->vid = w83627thf_read_gpio5(client); + } else if (w83687thf == data->type) { + data->vid = w83687thf_read_vid(client); } /* Read VRM & OVT Config only once */ - if (w83627thf == data->type || w83637hf == data->type) { + if (w83627thf == data->type || w83637hf == data->type + || w83687thf == data->type) { data->vrm_ovt = w83627hf_read_value(client, W83627THF_REG_VRM_OVT_CFG); } @@ -1387,14 +1431,14 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev) struct w83627hf_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i <= 8; i++) { /* skip missing sensors */ if (((data->type == w83697hf) && (i == 1)) || - ((data->type == w83627thf || data->type == w83637hf) + ((data->type != w83627hf && data->type != w83697hf) && (i == 5 || i == 6))) continue; data->in[i] = @@ -1470,7 +1514,7 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 64c1f8af5bb..e4c700356c4 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -42,6 +42,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> +#include <linux/mutex.h> #include <asm/io.h> #include "lm75.h" @@ -56,6 +57,10 @@ I2C_CLIENT_INSMOD_5(w83781d, w83782d, w83783s, w83627hf, as99127f); I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: " "{bus, clientaddr, subclientaddr1, subclientaddr2}"); +static int reset; +module_param(reset, bool, 0); +MODULE_PARM_DESC(reset, "Set to one to reset chip on load"); + static int init = 1; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization"); @@ -226,10 +231,10 @@ DIV_TO_REG(long val, enum chips type) struct w83781d_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore lock; + struct mutex lock; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -267,9 +272,8 @@ static int w83781d_isa_attach_adapter(struct i2c_adapter *adapter); static int w83781d_detect(struct i2c_adapter *adapter, int address, int kind); static int w83781d_detach_client(struct i2c_client *client); -static int w83781d_read_value(struct i2c_client *client, u16 register); -static int w83781d_write_value(struct i2c_client *client, u16 register, - u16 value); +static int w83781d_read_value(struct i2c_client *client, u16 reg); +static int w83781d_write_value(struct i2c_client *client, u16 reg, u16 value); static struct w83781d_data *w83781d_update_device(struct device *dev); static void w83781d_init_client(struct i2c_client *client); @@ -311,11 +315,11 @@ static ssize_t store_in_##reg (struct device *dev, const char *buf, size_t count \ val = simple_strtoul(buf, NULL, 10) / 10; \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ data->in_##reg[nr] = IN_TO_REG(val); \ w83781d_write_value(client, W83781D_REG_IN_##REG(nr), data->in_##reg[nr]); \ \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } store_in_reg(MIN, min); @@ -381,13 +385,13 @@ store_fan_min(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->fan_min[nr - 1] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr - 1])); w83781d_write_value(client, W83781D_REG_FAN_MIN(nr), data->fan_min[nr - 1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -446,7 +450,7 @@ static ssize_t store_temp_##reg (struct device *dev, const char *buf, size_t cou \ val = simple_strtol(buf, NULL, 10); \ \ - down(&data->update_lock); \ + mutex_lock(&data->update_lock); \ \ if (nr >= 2) { /* TEMP2 and TEMP3 */ \ data->temp_##reg##_add[nr-2] = LM75_TEMP_TO_REG(val); \ @@ -458,7 +462,7 @@ static ssize_t store_temp_##reg (struct device *dev, const char *buf, size_t cou data->temp_##reg); \ } \ \ - up(&data->update_lock); \ + mutex_unlock(&data->update_lock); \ return count; \ } store_temp_reg(OVER, max); @@ -571,7 +575,7 @@ store_beep_reg(struct device *dev, const char *buf, size_t count, val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (update_mask == BEEP_MASK) { /* We are storing beep_mask */ data->beep_mask = BEEP_MASK_TO_REG(val, data->type); @@ -592,7 +596,7 @@ store_beep_reg(struct device *dev, const char *buf, size_t count, w83781d_write_value(client, W83781D_REG_BEEP_INTS2, val2 | data->beep_enable << 7); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -637,7 +641,7 @@ store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr) u8 reg; unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); /* Save fan_min */ min = FAN_FROM_REG(data->fan_min[nr], @@ -662,7 +666,7 @@ store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr) data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); w83781d_write_value(client, W83781D_REG_FAN_MIN(nr+1), data->fan_min[nr]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -709,10 +713,10 @@ store_pwm_reg(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); data->pwm[nr - 1] = PWM_TO_REG(val); w83781d_write_value(client, W83781D_REG_PWM(nr), data->pwm[nr - 1]); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -725,7 +729,7 @@ store_pwmenable_reg(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); switch (val) { case 0: @@ -742,11 +746,11 @@ store_pwmenable_reg(struct device *dev, const char *buf, size_t count, int nr) break; default: - up(&data->update_lock); + mutex_unlock(&data->update_lock); return -EINVAL; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -808,7 +812,7 @@ store_sensor_reg(struct device *dev, const char *buf, size_t count, int nr) val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); switch (val) { case 1: /* PII/Celeron diode */ @@ -841,7 +845,7 @@ store_sensor_reg(struct device *dev, const char *buf, size_t count, int nr) break; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -1073,7 +1077,7 @@ w83781d_detect(struct i2c_adapter *adapter, int address, int kind) new_client = &data->client; i2c_set_clientdata(new_client, data); new_client->addr = address; - init_MUTEX(&data->lock); + mutex_init(&data->lock); new_client->adapter = adapter; new_client->driver = is_isa ? &w83781d_isa_driver : &w83781d_driver; new_client->flags = 0; @@ -1178,7 +1182,7 @@ w83781d_detect(struct i2c_adapter *adapter, int address, int kind) data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -1325,7 +1329,7 @@ w83781d_read_value(struct i2c_client *client, u16 reg) int res, word_sized, bank; struct i2c_client *cl; - down(&data->lock); + mutex_lock(&data->lock); if (i2c_is_isa_client(client)) { word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) @@ -1383,7 +1387,7 @@ w83781d_read_value(struct i2c_client *client, u16 reg) if (bank > 2) i2c_smbus_write_byte_data(client, W83781D_REG_BANK, 0); } - up(&data->lock); + mutex_unlock(&data->lock); return res; } @@ -1394,7 +1398,7 @@ w83781d_write_value(struct i2c_client *client, u16 reg, u16 value) int word_sized, bank; struct i2c_client *cl; - down(&data->lock); + mutex_lock(&data->lock); if (i2c_is_isa_client(client)) { word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) @@ -1447,7 +1451,7 @@ w83781d_write_value(struct i2c_client *client, u16 reg, u16 value) if (bank > 2) i2c_smbus_write_byte_data(client, W83781D_REG_BANK, 0); } - up(&data->lock); + mutex_unlock(&data->lock); return 0; } @@ -1459,8 +1463,17 @@ w83781d_init_client(struct i2c_client *client) int type = data->type; u8 tmp; - if (init && type != as99127f) { /* this resets registers we don't have + if (reset && type != as99127f) { /* this resets registers we don't have documentation for on the as99127f */ + /* Resetting the chip has been the default for a long time, + but it causes the BIOS initializations (fan clock dividers, + thermal sensor types...) to be lost, so it is now optional. + It might even go away if nobody reports it as being useful, + as I see very little reason why this would be needed at + all. */ + dev_info(&client->dev, "If reset=1 solved a problem you were " + "having, please report!\n"); + /* save these registers */ i = w83781d_read_value(client, W83781D_REG_BEEP_CONFIG); p = w83781d_read_value(client, W83781D_REG_PWMCLK12); @@ -1477,6 +1490,13 @@ w83781d_init_client(struct i2c_client *client) w83781d_write_value(client, W83781D_REG_BEEP_INTS2, 0); } + /* Disable power-on abnormal beep, as advised by the datasheet. + Already done if reset=1. */ + if (init && !reset && type != as99127f) { + i = w83781d_read_value(client, W83781D_REG_BEEP_CONFIG); + w83781d_write_value(client, W83781D_REG_BEEP_CONFIG, i | 0x80); + } + data->vrm = vid_which_vrm(); if ((type != w83781d) && (type != as99127f)) { @@ -1533,7 +1553,7 @@ static struct w83781d_data *w83781d_update_device(struct device *dev) struct w83781d_data *data = i2c_get_clientdata(client); int i; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { @@ -1641,7 +1661,7 @@ static struct w83781d_data *w83781d_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c index a2f6bb67623..6865c64d8a5 100644 --- a/drivers/hwmon/w83792d.c +++ b/drivers/hwmon/w83792d.c @@ -43,6 +43,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; @@ -271,7 +272,7 @@ struct w83792d_data { struct class_device *class_dev; enum chips type; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -382,30 +383,40 @@ static ssize_t store_in_##reg (struct device *dev, \ store_in_reg(MIN, min); store_in_reg(MAX, max); -#define sysfs_in_reg(offset) \ -static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in, \ - NULL, offset); \ -static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ - show_in_min, store_in_min, offset); \ -static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ - show_in_max, store_in_max, offset); - -sysfs_in_reg(0); -sysfs_in_reg(1); -sysfs_in_reg(2); -sysfs_in_reg(3); -sysfs_in_reg(4); -sysfs_in_reg(5); -sysfs_in_reg(6); -sysfs_in_reg(7); -sysfs_in_reg(8); - -#define device_create_file_in(client, offset) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_in##offset##_input.dev_attr); \ -device_create_file(&client->dev, &sensor_dev_attr_in##offset##_max.dev_attr); \ -device_create_file(&client->dev, &sensor_dev_attr_in##offset##_min.dev_attr); \ -} while (0) +static struct sensor_device_attribute sda_in_input[] = { + SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), + SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), + SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), + SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), + SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), + SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), + SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), + SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), + SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), +}; +static struct sensor_device_attribute sda_in_min[] = { + SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), + SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), + SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), + SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), + SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), + SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), + SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), + SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), + SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), +}; +static struct sensor_device_attribute sda_in_max[] = { + SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), + SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), + SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), + SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), + SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), + SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), + SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), + SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), + SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), +}; + #define show_fan_reg(reg) \ static ssize_t show_##reg (struct device *dev, struct device_attribute *attr, \ @@ -486,28 +497,33 @@ store_fan_div(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_fan(offset) \ -static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan, NULL, \ - offset); \ -static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ - show_fan_div, store_fan_div, offset); \ -static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ - show_fan_min, store_fan_min, offset); - -sysfs_fan(1); -sysfs_fan(2); -sysfs_fan(3); -sysfs_fan(4); -sysfs_fan(5); -sysfs_fan(6); -sysfs_fan(7); - -#define device_create_file_fan(client, offset) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_fan##offset##_input.dev_attr); \ -device_create_file(&client->dev, &sensor_dev_attr_fan##offset##_div.dev_attr); \ -device_create_file(&client->dev, &sensor_dev_attr_fan##offset##_min.dev_attr); \ -} while (0) +static struct sensor_device_attribute sda_fan_input[] = { + SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 1), + SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 2), + SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 3), + SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 4), + SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 5), + SENSOR_ATTR(fan6_input, S_IRUGO, show_fan, NULL, 6), + SENSOR_ATTR(fan7_input, S_IRUGO, show_fan, NULL, 7), +}; +static struct sensor_device_attribute sda_fan_min[] = { + SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 1), + SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 2), + SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 3), + SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 4), + SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 5), + SENSOR_ATTR(fan6_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 6), + SENSOR_ATTR(fan7_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 7), +}; +static struct sensor_device_attribute sda_fan_div[] = { + SENSOR_ATTR(fan1_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 1), + SENSOR_ATTR(fan2_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 2), + SENSOR_ATTR(fan3_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 3), + SENSOR_ATTR(fan4_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 4), + SENSOR_ATTR(fan5_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 5), + SENSOR_ATTR(fan6_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 6), + SENSOR_ATTR(fan7_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 7), +}; /* read/write the temperature1, includes measured value and limits */ @@ -539,21 +555,6 @@ static ssize_t store_temp1(struct device *dev, struct device_attribute *attr, return count; } - -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0); -static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp1, - store_temp1, 1); -static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1, - store_temp1, 2); - -#define device_create_file_temp1(client) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_temp1_input.dev_attr); \ -device_create_file(&client->dev, &sensor_dev_attr_temp1_max.dev_attr); \ -device_create_file(&client->dev, &sensor_dev_attr_temp1_max_hyst.dev_attr); \ -} while (0) - - /* read/write the temperature2-3, includes measured value and limits */ static ssize_t show_temp23(struct device *dev, struct device_attribute *attr, @@ -590,25 +591,23 @@ static ssize_t store_temp23(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_temp23(name,idx) \ -static SENSOR_DEVICE_ATTR_2(name##_input, S_IRUGO, show_temp23, NULL, \ - idx, 0); \ -static SENSOR_DEVICE_ATTR_2(name##_max, S_IRUGO | S_IWUSR, \ - show_temp23, store_temp23, idx, 2); \ -static SENSOR_DEVICE_ATTR_2(name##_max_hyst, S_IRUGO | S_IWUSR, \ - show_temp23, store_temp23, idx, 4); - -sysfs_temp23(temp2,0) -sysfs_temp23(temp3,1) +static struct sensor_device_attribute_2 sda_temp_input[] = { + SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp1, NULL, 0, 0), + SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp23, NULL, 0, 0), + SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp23, NULL, 1, 0), +}; -#define device_create_file_temp_add(client, offset) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_temp##offset##_input.dev_attr); \ -device_create_file(&client->dev, &sensor_dev_attr_temp##offset##_max.dev_attr); \ -device_create_file(&client->dev, \ -&sensor_dev_attr_temp##offset##_max_hyst.dev_attr); \ -} while (0) +static struct sensor_device_attribute_2 sda_temp_max[] = { + SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 1), + SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 2), + SENSOR_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 2), +}; +static struct sensor_device_attribute_2 sda_temp_max_hyst[] = { + SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 2), + SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 4), + SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 4), +}; /* get reatime status of all sensors items: voltage, temp, fan */ static ssize_t @@ -620,10 +619,6 @@ show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); -#define device_create_file_alarms(client) \ -device_create_file(&client->dev, &dev_attr_alarms); - - static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, @@ -711,26 +706,19 @@ store_pwmenable(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_pwm(offset) \ -static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \ - show_pwm, store_pwm, offset); \ -static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \ - show_pwmenable, store_pwmenable, offset); \ - -sysfs_pwm(1); -sysfs_pwm(2); -sysfs_pwm(3); - - -#define device_create_file_pwm(client, offset) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_pwm##offset.dev_attr); \ -} while (0) - -#define device_create_file_pwmenable(client, offset) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_pwm##offset##_enable.dev_attr); \ -} while (0) +static struct sensor_device_attribute sda_pwm[] = { + SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), + SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2), + SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3), +}; +static struct sensor_device_attribute sda_pwm_enable[] = { + SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, + show_pwmenable, store_pwmenable, 1), + SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, + show_pwmenable, store_pwmenable, 2), + SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, + show_pwmenable, store_pwmenable, 3), +}; static ssize_t @@ -764,18 +752,14 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_pwm_mode(offset) \ -static SENSOR_DEVICE_ATTR(pwm##offset##_mode, S_IRUGO | S_IWUSR, \ - show_pwm_mode, store_pwm_mode, offset); - -sysfs_pwm_mode(1); -sysfs_pwm_mode(2); -sysfs_pwm_mode(3); - -#define device_create_file_pwm_mode(client, offset) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_pwm##offset##_mode.dev_attr); \ -} while (0) +static struct sensor_device_attribute sda_pwm_mode[] = { + SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, + show_pwm_mode, store_pwm_mode, 1), + SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, + show_pwm_mode, store_pwm_mode, 2), + SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, + show_pwm_mode, store_pwm_mode, 3), +}; static ssize_t @@ -788,12 +772,6 @@ show_regs_chassis(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL); -#define device_create_file_chassis(client) \ -do { \ -device_create_file(&client->dev, &dev_attr_chassis); \ -} while (0) - - static ssize_t show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf) { @@ -824,13 +802,6 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR, show_chassis_clear, store_chassis_clear); -#define device_create_file_chassis_clear(client) \ -do { \ -device_create_file(&client->dev, &dev_attr_chassis_clear); \ -} while (0) - - - /* For Smart Fan I / Thermal Cruise */ static ssize_t show_thermal_cruise(struct device *dev, struct device_attribute *attr, @@ -864,20 +835,14 @@ store_thermal_cruise(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_thermal_cruise(offset) \ -static SENSOR_DEVICE_ATTR(thermal_cruise##offset, S_IRUGO | S_IWUSR, \ - show_thermal_cruise, store_thermal_cruise, offset); - -sysfs_thermal_cruise(1); -sysfs_thermal_cruise(2); -sysfs_thermal_cruise(3); - -#define device_create_file_thermal_cruise(client, offset) \ -do { \ -device_create_file(&client->dev, \ -&sensor_dev_attr_thermal_cruise##offset.dev_attr); \ -} while (0) - +static struct sensor_device_attribute sda_thermal_cruise[] = { + SENSOR_ATTR(thermal_cruise1, S_IWUSR | S_IRUGO, + show_thermal_cruise, store_thermal_cruise, 1), + SENSOR_ATTR(thermal_cruise2, S_IWUSR | S_IRUGO, + show_thermal_cruise, store_thermal_cruise, 2), + SENSOR_ATTR(thermal_cruise3, S_IWUSR | S_IRUGO, + show_thermal_cruise, store_thermal_cruise, 3), +}; /* For Smart Fan I/Thermal Cruise and Smart Fan II */ static ssize_t @@ -916,19 +881,14 @@ store_tolerance(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_tolerance(offset) \ -static SENSOR_DEVICE_ATTR(tolerance##offset, S_IRUGO | S_IWUSR, \ - show_tolerance, store_tolerance, offset); - -sysfs_tolerance(1); -sysfs_tolerance(2); -sysfs_tolerance(3); - -#define device_create_file_tolerance(client, offset) \ -do { \ -device_create_file(&client->dev, &sensor_dev_attr_tolerance##offset.dev_attr); \ -} while (0) - +static struct sensor_device_attribute sda_tolerance[] = { + SENSOR_ATTR(tolerance1, S_IWUSR | S_IRUGO, + show_tolerance, store_tolerance, 1), + SENSOR_ATTR(tolerance2, S_IWUSR | S_IRUGO, + show_tolerance, store_tolerance, 2), + SENSOR_ATTR(tolerance3, S_IWUSR | S_IRUGO, + show_tolerance, store_tolerance, 3), +}; /* For Smart Fan II */ static ssize_t @@ -964,28 +924,34 @@ store_sf2_point(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_sf2_point(offset, index) \ -static SENSOR_DEVICE_ATTR_2(sf2_point##offset##_fan##index, S_IRUGO | S_IWUSR, \ - show_sf2_point, store_sf2_point, offset, index); - -sysfs_sf2_point(1, 1); /* Fan1 */ -sysfs_sf2_point(2, 1); /* Fan1 */ -sysfs_sf2_point(3, 1); /* Fan1 */ -sysfs_sf2_point(4, 1); /* Fan1 */ -sysfs_sf2_point(1, 2); /* Fan2 */ -sysfs_sf2_point(2, 2); /* Fan2 */ -sysfs_sf2_point(3, 2); /* Fan2 */ -sysfs_sf2_point(4, 2); /* Fan2 */ -sysfs_sf2_point(1, 3); /* Fan3 */ -sysfs_sf2_point(2, 3); /* Fan3 */ -sysfs_sf2_point(3, 3); /* Fan3 */ -sysfs_sf2_point(4, 3); /* Fan3 */ - -#define device_create_file_sf2_point(client, offset, index) \ -do { \ -device_create_file(&client->dev, \ -&sensor_dev_attr_sf2_point##offset##_fan##index.dev_attr); \ -} while (0) +static struct sensor_device_attribute_2 sda_sf2_point[] = { + SENSOR_ATTR_2(sf2_point1_fan1, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 1, 1), + SENSOR_ATTR_2(sf2_point2_fan1, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 2, 1), + SENSOR_ATTR_2(sf2_point3_fan1, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 3, 1), + SENSOR_ATTR_2(sf2_point4_fan1, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 4, 1), + + SENSOR_ATTR_2(sf2_point1_fan2, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 1, 2), + SENSOR_ATTR_2(sf2_point2_fan2, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 2, 2), + SENSOR_ATTR_2(sf2_point3_fan2, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 3, 2), + SENSOR_ATTR_2(sf2_point4_fan2, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 4, 2), + + SENSOR_ATTR_2(sf2_point1_fan3, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 1, 3), + SENSOR_ATTR_2(sf2_point2_fan3, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 2, 3), + SENSOR_ATTR_2(sf2_point3_fan3, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 3, 3), + SENSOR_ATTR_2(sf2_point4_fan3, S_IRUGO | S_IWUSR, + show_sf2_point, store_sf2_point, 4, 3), +}; static ssize_t @@ -1026,26 +992,28 @@ store_sf2_level(struct device *dev, struct device_attribute *attr, return count; } -#define sysfs_sf2_level(offset, index) \ -static SENSOR_DEVICE_ATTR_2(sf2_level##offset##_fan##index, S_IRUGO | S_IWUSR, \ - show_sf2_level, store_sf2_level, offset, index); - -sysfs_sf2_level(1, 1); /* Fan1 */ -sysfs_sf2_level(2, 1); /* Fan1 */ -sysfs_sf2_level(3, 1); /* Fan1 */ -sysfs_sf2_level(1, 2); /* Fan2 */ -sysfs_sf2_level(2, 2); /* Fan2 */ -sysfs_sf2_level(3, 2); /* Fan2 */ -sysfs_sf2_level(1, 3); /* Fan3 */ -sysfs_sf2_level(2, 3); /* Fan3 */ -sysfs_sf2_level(3, 3); /* Fan3 */ - -#define device_create_file_sf2_level(client, offset, index) \ -do { \ -device_create_file(&client->dev, \ -&sensor_dev_attr_sf2_level##offset##_fan##index.dev_attr); \ -} while (0) - +static struct sensor_device_attribute_2 sda_sf2_level[] = { + SENSOR_ATTR_2(sf2_level1_fan1, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 1, 1), + SENSOR_ATTR_2(sf2_level2_fan1, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 2, 1), + SENSOR_ATTR_2(sf2_level3_fan1, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 3, 1), + + SENSOR_ATTR_2(sf2_level1_fan2, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 1, 2), + SENSOR_ATTR_2(sf2_level2_fan2, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 2, 2), + SENSOR_ATTR_2(sf2_level3_fan2, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 3, 2), + + SENSOR_ATTR_2(sf2_level1_fan3, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 1, 3), + SENSOR_ATTR_2(sf2_level2_fan3, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 2, 3), + SENSOR_ATTR_2(sf2_level3_fan3, S_IRUGO | S_IWUSR, + show_sf2_level, store_sf2_level, 3, 3), +}; /* This function is called when: * w83792d_driver is inserted (when this module is loaded), for each @@ -1147,12 +1115,19 @@ ERROR_SC_0: return err; } +static void device_create_file_fan(struct device *dev, int i) +{ + device_create_file(dev, &sda_fan_input[i].dev_attr); + device_create_file(dev, &sda_fan_div[i].dev_attr); + device_create_file(dev, &sda_fan_min[i].dev_attr); +} static int w83792d_detect(struct i2c_adapter *adapter, int address, int kind) { int i = 0, val1 = 0, val2; - struct i2c_client *new_client; + struct i2c_client *client; + struct device *dev; struct w83792d_data *data; int err = 0; const char *client_name = ""; @@ -1170,12 +1145,13 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind) goto ERROR0; } - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &w83792d_driver; - new_client->flags = 0; + client = &data->client; + dev = &client->dev; + i2c_set_clientdata(client, data); + client->addr = address; + client->adapter = adapter; + client->driver = &w83792d_driver; + client->flags = 0; /* Now, we do the remaining detection. */ @@ -1184,13 +1160,12 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind) force_*=... parameter, and the Winbond will be reset to the right bank. */ if (kind < 0) { - if (w83792d_read_value(new_client, W83792D_REG_CONFIG) & 0x80) { - dev_warn(&new_client->dev, "Detection failed at step " - "3\n"); + if (w83792d_read_value(client, W83792D_REG_CONFIG) & 0x80) { + dev_warn(dev, "Detection failed at step 3\n"); goto ERROR1; } - val1 = w83792d_read_value(new_client, W83792D_REG_BANK); - val2 = w83792d_read_value(new_client, W83792D_REG_CHIPMAN); + val1 = w83792d_read_value(client, W83792D_REG_BANK); + val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); /* Check for Winbond ID if in bank 0 */ if (!(val1 & 0x07)) { /* is Bank0 */ if (((!(val1 & 0x80)) && (val2 != 0xa3)) || @@ -1200,34 +1175,33 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind) } /* If Winbond chip, address of chip and W83792D_REG_I2C_ADDR should match */ - if (w83792d_read_value(new_client, + if (w83792d_read_value(client, W83792D_REG_I2C_ADDR) != address) { - dev_warn(&new_client->dev, "Detection failed " - "at step 5\n"); + dev_warn(dev, "Detection failed at step 5\n"); goto ERROR1; } } /* We have either had a force parameter, or we have already detected the Winbond. Put it now into bank 0 and Vendor ID High Byte */ - w83792d_write_value(new_client, + w83792d_write_value(client, W83792D_REG_BANK, - (w83792d_read_value(new_client, + (w83792d_read_value(client, W83792D_REG_BANK) & 0x78) | 0x80); /* Determine the chip type. */ if (kind <= 0) { /* get vendor ID */ - val2 = w83792d_read_value(new_client, W83792D_REG_CHIPMAN); + val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); if (val2 != 0x5c) { /* the vendor is NOT Winbond */ goto ERROR1; } - val1 = w83792d_read_value(new_client, W83792D_REG_WCHIPID); + val1 = w83792d_read_value(client, W83792D_REG_WCHIPID); if (val1 == 0x7a) { kind = w83792d; } else { if (kind == 0) - dev_warn(&new_client->dev, + dev_warn(dev, "w83792d: Ignoring 'force' parameter for" " unknown chip at adapter %d, address" " 0x%02x\n", i2c_adapter_id(adapter), @@ -1239,120 +1213,86 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind) if (kind == w83792d) { client_name = "w83792d"; } else { - dev_err(&new_client->dev, "w83792d: Internal error: unknown" + dev_err(dev, "w83792d: Internal error: unknown" " kind (%d)?!?", kind); goto ERROR1; } /* Fill in the remaining client fields and put into the global list */ - strlcpy(new_client->name, client_name, I2C_NAME_SIZE); + strlcpy(client->name, client_name, I2C_NAME_SIZE); data->type = kind; data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) + if ((err = i2c_attach_client(client))) goto ERROR1; if ((err = w83792d_detect_subclients(adapter, address, - kind, new_client))) + kind, client))) goto ERROR2; /* Initialize the chip */ - w83792d_init_client(new_client); + w83792d_init_client(client); /* A few vars need to be filled upon startup */ for (i = 0; i < 7; i++) { - data->fan_min[i] = w83792d_read_value(new_client, + data->fan_min[i] = w83792d_read_value(client, W83792D_REG_FAN_MIN[i]); } /* Register sysfs hooks */ - data->class_dev = hwmon_device_register(&new_client->dev); + data->class_dev = hwmon_device_register(dev); if (IS_ERR(data->class_dev)) { err = PTR_ERR(data->class_dev); goto ERROR3; } - device_create_file_in(new_client, 0); - device_create_file_in(new_client, 1); - device_create_file_in(new_client, 2); - device_create_file_in(new_client, 3); - device_create_file_in(new_client, 4); - device_create_file_in(new_client, 5); - device_create_file_in(new_client, 6); - device_create_file_in(new_client, 7); - device_create_file_in(new_client, 8); - - device_create_file_fan(new_client, 1); - device_create_file_fan(new_client, 2); - device_create_file_fan(new_client, 3); + for (i = 0; i < 9; i++) { + device_create_file(dev, &sda_in_input[i].dev_attr); + device_create_file(dev, &sda_in_max[i].dev_attr); + device_create_file(dev, &sda_in_min[i].dev_attr); + } + for (i = 0; i < 3; i++) + device_create_file_fan(dev, i); /* Read GPIO enable register to check if pins for fan 4,5 are used as GPIO */ - val1 = w83792d_read_value(new_client, W83792D_REG_GPIO_EN); + val1 = w83792d_read_value(client, W83792D_REG_GPIO_EN); if (!(val1 & 0x40)) - device_create_file_fan(new_client, 4); + device_create_file_fan(dev, 3); if (!(val1 & 0x20)) - device_create_file_fan(new_client, 5); + device_create_file_fan(dev, 4); - val1 = w83792d_read_value(new_client, W83792D_REG_PIN); + val1 = w83792d_read_value(client, W83792D_REG_PIN); if (val1 & 0x40) - device_create_file_fan(new_client, 6); + device_create_file_fan(dev, 5); if (val1 & 0x04) - device_create_file_fan(new_client, 7); - - device_create_file_temp1(new_client); /* Temp1 */ - device_create_file_temp_add(new_client, 2); /* Temp2 */ - device_create_file_temp_add(new_client, 3); /* Temp3 */ - - device_create_file_alarms(new_client); - - device_create_file_pwm(new_client, 1); - device_create_file_pwm(new_client, 2); - device_create_file_pwm(new_client, 3); - - device_create_file_pwmenable(new_client, 1); - device_create_file_pwmenable(new_client, 2); - device_create_file_pwmenable(new_client, 3); - - device_create_file_pwm_mode(new_client, 1); - device_create_file_pwm_mode(new_client, 2); - device_create_file_pwm_mode(new_client, 3); - - device_create_file_chassis(new_client); - device_create_file_chassis_clear(new_client); - - device_create_file_thermal_cruise(new_client, 1); - device_create_file_thermal_cruise(new_client, 2); - device_create_file_thermal_cruise(new_client, 3); - - device_create_file_tolerance(new_client, 1); - device_create_file_tolerance(new_client, 2); - device_create_file_tolerance(new_client, 3); - - device_create_file_sf2_point(new_client, 1, 1); /* Fan1 */ - device_create_file_sf2_point(new_client, 2, 1); /* Fan1 */ - device_create_file_sf2_point(new_client, 3, 1); /* Fan1 */ - device_create_file_sf2_point(new_client, 4, 1); /* Fan1 */ - device_create_file_sf2_point(new_client, 1, 2); /* Fan2 */ - device_create_file_sf2_point(new_client, 2, 2); /* Fan2 */ - device_create_file_sf2_point(new_client, 3, 2); /* Fan2 */ - device_create_file_sf2_point(new_client, 4, 2); /* Fan2 */ - device_create_file_sf2_point(new_client, 1, 3); /* Fan3 */ - device_create_file_sf2_point(new_client, 2, 3); /* Fan3 */ - device_create_file_sf2_point(new_client, 3, 3); /* Fan3 */ - device_create_file_sf2_point(new_client, 4, 3); /* Fan3 */ - - device_create_file_sf2_level(new_client, 1, 1); /* Fan1 */ - device_create_file_sf2_level(new_client, 2, 1); /* Fan1 */ - device_create_file_sf2_level(new_client, 3, 1); /* Fan1 */ - device_create_file_sf2_level(new_client, 1, 2); /* Fan2 */ - device_create_file_sf2_level(new_client, 2, 2); /* Fan2 */ - device_create_file_sf2_level(new_client, 3, 2); /* Fan2 */ - device_create_file_sf2_level(new_client, 1, 3); /* Fan3 */ - device_create_file_sf2_level(new_client, 2, 3); /* Fan3 */ - device_create_file_sf2_level(new_client, 3, 3); /* Fan3 */ + device_create_file_fan(dev, 6); + + for (i = 0; i < 3; i++) { + device_create_file(dev, &sda_temp_input[i].dev_attr); + device_create_file(dev, &sda_temp_max[i].dev_attr); + device_create_file(dev, &sda_temp_max_hyst[i].dev_attr); + device_create_file(dev, &sda_thermal_cruise[i].dev_attr); + device_create_file(dev, &sda_tolerance[i].dev_attr); + } + + for (i = 0; i < ARRAY_SIZE(sda_pwm); i++) { + device_create_file(dev, &sda_pwm[i].dev_attr); + device_create_file(dev, &sda_pwm_enable[i].dev_attr); + device_create_file(dev, &sda_pwm_mode[i].dev_attr); + } + + device_create_file(dev, &dev_attr_alarms); + device_create_file(dev, &dev_attr_chassis); + device_create_file(dev, &dev_attr_chassis_clear); + + for (i = 0; i < ARRAY_SIZE(sda_sf2_point); i++) + device_create_file(dev, &sda_sf2_point[i].dev_attr); + + for (i = 0; i < ARRAY_SIZE(sda_sf2_level); i++) + device_create_file(dev, &sda_sf2_level[i].dev_attr); return 0; @@ -1366,7 +1306,7 @@ ERROR3: kfree(data->lm75[1]); } ERROR2: - i2c_detach_client(new_client); + i2c_detach_client(client); ERROR1: kfree(data); ERROR0: @@ -1434,7 +1374,7 @@ static struct w83792d_data *w83792d_update_device(struct device *dev) int i, j; u8 reg_array_tmp[4], pwm_array_tmp[7], reg_tmp; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (time_after (jiffies - data->last_updated, (unsigned long) (HZ * 3)) @@ -1545,7 +1485,7 @@ static struct w83792d_data *w83792d_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); #ifdef DEBUG w83792d_print_debug(data, dev); diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c index f66c0cfdeda..3f2bac125fb 100644 --- a/drivers/hwmon/w83l785ts.c +++ b/drivers/hwmon/w83l785ts.c @@ -39,6 +39,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> +#include <linux/mutex.h> /* How many retries on register read error */ #define MAX_RETRIES 5 @@ -107,7 +108,7 @@ static struct i2c_driver w83l785ts_driver = { struct w83l785ts_data { struct i2c_client client; struct class_device *class_dev; - struct semaphore update_lock; + struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -221,7 +222,7 @@ static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind) /* We can fill in the remaining client fields. */ strlcpy(new_client->name, "w83l785ts", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Default values in case the first read fails (unlikely). */ data->temp[1] = data->temp[0] = 0; @@ -299,7 +300,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct w83l785ts_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_updated + HZ * 2)) { dev_dbg(&client->dev, "Updating w83l785ts data.\n"); @@ -312,7 +313,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev) data->valid = 1; } - up(&data->update_lock); + mutex_unlock(&data->update_lock); return data; } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index ff92735c7c8..089c6f5b24d 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -168,12 +168,14 @@ config I2C_PIIX4 help If you say yes to this option, support will be included for the Intel PIIX4 family of mainboard I2C interfaces. Specifically, the following - versions of the chipset are supported: + versions of the chipset are supported (note that Serverworks is part + of Broadcom): Intel PIIX4 Intel 440MX Serverworks OSB4 Serverworks CSB5 Serverworks CSB6 + Serverworks HT-1000 SMSC Victory66 This driver can also be built as a module. If so, the module @@ -389,10 +391,11 @@ config SCx200_I2C_SDA also be specified with a module parameter. config SCx200_ACB - tristate "NatSemi SCx200 ACCESS.bus" - depends on I2C && PCI + tristate "Geode ACCESS.bus support" + depends on X86_32 && I2C && PCI help - Enable the use of the ACCESS.bus controllers of a SCx200 processor. + Enable the use of the ACCESS.bus controllers on the Geode SCx200 and + SC1100 processors and the CS5535 and CS5536 Geode companion devices. If you don't know what to do here, say N. diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index 3eb47890db4..d3ef46aeeb3 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -63,7 +63,6 @@ #include <linux/i2c.h> #include <linux/init.h> #include <asm/io.h> -#include <asm/semaphore.h> /* ALI1535 SMBus address offsets */ @@ -136,7 +135,6 @@ static struct pci_driver ali1535_driver; static unsigned short ali1535_smba; -static DECLARE_MUTEX(i2c_ali1535_sem); /* Detect whether a ALI1535 can be found, and initialize it, where necessary. Note the differences between kernels with the old PCI BIOS interface and @@ -345,7 +343,6 @@ static s32 ali1535_access(struct i2c_adapter *adap, u16 addr, int timeout; s32 result = 0; - down(&i2c_ali1535_sem); /* make sure SMBus is idle */ temp = inb_p(SMBHSTSTS); for (timeout = 0; @@ -460,7 +457,6 @@ static s32 ali1535_access(struct i2c_adapter *adap, u16 addr, break; } EXIT: - up(&i2c_ali1535_sem); return result; } diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c index 56c7d987590..08e915730ca 100644 --- a/drivers/i2c/busses/i2c-amd756-s4882.c +++ b/drivers/i2c/busses/i2c-amd756-s4882.c @@ -38,6 +38,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/i2c.h> +#include <linux/mutex.h> extern struct i2c_adapter amd756_smbus; @@ -45,7 +46,7 @@ static struct i2c_adapter *s4882_adapter; static struct i2c_algorithm *s4882_algo; /* Wrapper access functions for multiplexed SMBus */ -static struct semaphore amd756_lock; +static DEFINE_MUTEX(amd756_lock); static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, @@ -59,12 +60,12 @@ static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr, || addr == 0x18) return -1; - down(&amd756_lock); + mutex_lock(&amd756_lock); error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); - up(&amd756_lock); + mutex_unlock(&amd756_lock); return error; } @@ -87,7 +88,7 @@ static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr, if (addr != 0x4c && (addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) return -1; - down(&amd756_lock); + mutex_lock(&amd756_lock); if (last_channels != channels) { union i2c_smbus_data mplxdata; @@ -105,7 +106,7 @@ static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr, command, size, data); UNLOCK: - up(&amd756_lock); + mutex_unlock(&amd756_lock); return error; } @@ -166,8 +167,6 @@ static int __init amd756_s4882_init(void) } printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n"); - init_MUTEX(&amd756_lock); - /* Define the 5 virtual adapters and algorithms structures */ if (!(s4882_adapter = kzalloc(5 * sizeof(struct i2c_adapter), GFP_KERNEL))) { diff --git a/drivers/i2c/busses/i2c-frodo.c b/drivers/i2c/busses/i2c-frodo.c deleted file mode 100644 index b6f52f5a413..00000000000 --- a/drivers/i2c/busses/i2c-frodo.c +++ /dev/null @@ -1,85 +0,0 @@ - -/* - * linux/drivers/i2c/i2c-frodo.c - * - * Author: Abraham van der Merwe <abraham@2d3d.co.za> - * - * An I2C adapter driver for the 2d3D, Inc. StrongARM SA-1110 - * Development board (Frodo). - * - * This source code is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/i2c.h> -#include <linux/i2c-algo-bit.h> -#include <asm/hardware.h> - - -static void frodo_setsda (void *data,int state) -{ - if (state) - FRODO_CPLD_I2C |= FRODO_I2C_SDA_OUT; - else - FRODO_CPLD_I2C &= ~FRODO_I2C_SDA_OUT; -} - -static void frodo_setscl (void *data,int state) -{ - if (state) - FRODO_CPLD_I2C |= FRODO_I2C_SCL_OUT; - else - FRODO_CPLD_I2C &= ~FRODO_I2C_SCL_OUT; -} - -static int frodo_getsda (void *data) -{ - return ((FRODO_CPLD_I2C & FRODO_I2C_SDA_IN) != 0); -} - -static int frodo_getscl (void *data) -{ - return ((FRODO_CPLD_I2C & FRODO_I2C_SCL_IN) != 0); -} - -static struct i2c_algo_bit_data bit_frodo_data = { - .setsda = frodo_setsda, - .setscl = frodo_setscl, - .getsda = frodo_getsda, - .getscl = frodo_getscl, - .udelay = 80, - .mdelay = 80, - .timeout = HZ -}; - -static struct i2c_adapter frodo_ops = { - .owner = THIS_MODULE, - .id = I2C_HW_B_FRODO, - .algo_data = &bit_frodo_data, - .dev = { - .name = "Frodo adapter driver", - }, -}; - -static int __init i2c_frodo_init (void) -{ - return i2c_bit_add_bus(&frodo_ops); -} - -static void __exit i2c_frodo_exit (void) -{ - i2c_bit_del_bus(&frodo_ops); -} - -MODULE_AUTHOR ("Abraham van der Merwe <abraham@2d3d.co.za>"); -MODULE_DESCRIPTION ("I2C-Bus adapter routines for Frodo"); -MODULE_LICENSE ("GPL"); - -module_init (i2c_frodo_init); -module_exit (i2c_frodo_exit); - diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c index 4344ae6b1fc..c3e1d3e888d 100644 --- a/drivers/i2c/busses/i2c-isa.c +++ b/drivers/i2c/busses/i2c-isa.c @@ -125,7 +125,7 @@ int i2c_isa_del_driver(struct i2c_driver *driver) static int __init i2c_isa_init(void) { - init_MUTEX(&isa_adapter.clist_lock); + mutex_init(&isa_adapter.clist_lock); INIT_LIST_HEAD(&isa_adapter.clients); isa_adapter.nr = ANY_I2C_ISA_BUS; diff --git a/drivers/i2c/busses/i2c-ite.c b/drivers/i2c/busses/i2c-ite.c index 5f5d2944808..d82e6dae840 100644 --- a/drivers/i2c/busses/i2c-ite.c +++ b/drivers/i2c/busses/i2c-ite.c @@ -200,9 +200,7 @@ static struct i2c_adapter iic_ite_ops = { .owner = THIS_MODULE, .id = I2C_HW_I_IIC, .algo_data = &iic_ite_data, - .dev = { - .name = "ITE IIC adapter", - }, + .name = "ITE IIC adapter", }; /* Called when the module is loaded. This function starts the diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c index e422d8b2d4d..2ed07112d68 100644 --- a/drivers/i2c/busses/i2c-ixp4xx.c +++ b/drivers/i2c/busses/i2c-ixp4xx.c @@ -126,6 +126,7 @@ static int ixp4xx_i2c_probe(struct platform_device *plat_dev) drv_data->algo_data.timeout = 100; drv_data->adapter.id = I2C_HW_B_IXP4XX; + drv_data->adapter.class = I2C_CLASS_HWMON; strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, I2C_NAME_SIZE); drv_data->adapter.algo_data = &drv_data->algo_data; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 692f4734548..d9c7c00e71f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -22,7 +22,7 @@ /* Supports: Intel PIIX4, 440MX - Serverworks OSB4, CSB5, CSB6 + Serverworks OSB4, CSB5, CSB6, HT-1000 SMSC Victory66 Note: we assume there can only be one device, with one SMBus interface. @@ -419,6 +419,8 @@ static struct pci_device_id piix4_ids[] = { .driver_data = 0 }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6), .driver_data = 0 }, + { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB), + .driver_data = 0 }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3), .driver_data = 3 }, { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3), diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 7579f4b256a..5155010b455 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -647,7 +647,7 @@ static inline void i2c_pxa_start_message(struct pxa_i2c *i2c) } /* - * We are protected by the adapter bus semaphore. + * We are protected by the adapter bus mutex. */ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) { diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index d3478e08452..8bd305e47f0 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -1,27 +1,26 @@ -/* linux/drivers/i2c/scx200_acb.c - +/* Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> National Semiconductor SCx200 ACCESS.bus support - + Also supports the AMD CS5535 and AMD CS5536 + Based on i2c-keywest.c which is: Copyright (c) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org> Copyright (c) 2000 Philip Edelbrock <phil@stimpy.netroedge.com> - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ #include <linux/module.h> @@ -32,7 +31,9 @@ #include <linux/smp_lock.h> #include <linux/pci.h> #include <linux/delay.h> +#include <linux/mutex.h> #include <asm/io.h> +#include <asm/msr.h> #include <linux/scx200.h> @@ -47,16 +48,7 @@ static int base[MAX_DEVICES] = { 0x820, 0x840 }; module_param_array(base, int, NULL, 0); MODULE_PARM_DESC(base, "Base addresses for the ACCESS.bus controllers"); -#ifdef DEBUG -#define DBG(x...) printk(KERN_DEBUG NAME ": " x) -#else -#define DBG(x...) -#endif - -/* The hardware supports interrupt driven mode too, but I haven't - implemented that. */ -#define POLLED_MODE 1 -#define POLL_TIMEOUT (HZ) +#define POLL_TIMEOUT (HZ/5) enum scx200_acb_state { state_idle, @@ -79,12 +71,11 @@ static const char *scx200_acb_state_name[] = { }; /* Physical interface */ -struct scx200_acb_iface -{ +struct scx200_acb_iface { struct scx200_acb_iface *next; struct i2c_adapter adapter; unsigned base; - struct semaphore sem; + struct mutex mutex; /* State machine data */ enum scx200_acb_state state; @@ -100,7 +91,7 @@ struct scx200_acb_iface #define ACBSDA (iface->base + 0) #define ACBST (iface->base + 1) #define ACBST_SDAST 0x40 /* SDA Status */ -#define ACBST_BER 0x20 +#define ACBST_BER 0x20 #define ACBST_NEGACK 0x10 /* Negative Acknowledge */ #define ACBST_STASTR 0x08 /* Stall After Start */ #define ACBST_MASTER 0x02 @@ -109,9 +100,9 @@ struct scx200_acb_iface #define ACBCTL1 (iface->base + 3) #define ACBCTL1_STASTRE 0x80 #define ACBCTL1_NMINTE 0x40 -#define ACBCTL1_ACK 0x10 -#define ACBCTL1_STOP 0x02 -#define ACBCTL1_START 0x01 +#define ACBCTL1_ACK 0x10 +#define ACBCTL1_STOP 0x02 +#define ACBCTL1_START 0x01 #define ACBADDR (iface->base + 4) #define ACBCTL2 (iface->base + 5) #define ACBCTL2_ENABLE 0x01 @@ -122,8 +113,8 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) { const char *errmsg; - DBG("state %s, status = 0x%02x\n", - scx200_acb_state_name[iface->state], status); + dev_dbg(&iface->adapter.dev, "state %s, status = 0x%02x\n", + scx200_acb_state_name[iface->state], status); if (status & ACBST_BER) { errmsg = "bus error"; @@ -133,8 +124,17 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) errmsg = "not master"; goto error; } - if (status & ACBST_NEGACK) - goto negack; + if (status & ACBST_NEGACK) { + dev_dbg(&iface->adapter.dev, "negative ack in state %s\n", + scx200_acb_state_name[iface->state]); + + iface->state = state_idle; + iface->result = -ENXIO; + + outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); + outb(ACBST_STASTR | ACBST_NEGACK, ACBST); + return; + } switch (iface->state) { case state_idle: @@ -160,10 +160,10 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) case state_repeat_start: outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); /* fallthrough */ - + case state_quick: if (iface->address_byte & 1) { - if (iface->len == 1) + if (iface->len == 1) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); @@ -202,26 +202,15 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); break; } - + outb(*iface->ptr++, ACBSDA); --iface->len; - + break; } return; - negack: - DBG("negative acknowledge in state %s\n", - scx200_acb_state_name[iface->state]); - - iface->state = state_idle; - iface->result = -ENXIO; - - outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); - outb(ACBST_STASTR | ACBST_NEGACK, ACBST); - return; - error: dev_err(&iface->adapter.dev, "%s in state %s\n", errmsg, scx200_acb_state_name[iface->state]); @@ -231,20 +220,9 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) iface->needs_reset = 1; } -static void scx200_acb_timeout(struct scx200_acb_iface *iface) -{ - dev_err(&iface->adapter.dev, "timeout in state %s\n", - scx200_acb_state_name[iface->state]); - - iface->state = state_idle; - iface->result = -EIO; - iface->needs_reset = 1; -} - -#ifdef POLLED_MODE static void scx200_acb_poll(struct scx200_acb_iface *iface) { - u8 status = 0; + u8 status; unsigned long timeout; timeout = jiffies + POLL_TIMEOUT; @@ -254,17 +232,21 @@ static void scx200_acb_poll(struct scx200_acb_iface *iface) scx200_acb_machine(iface, status); return; } - msleep(10); + yield(); } - scx200_acb_timeout(iface); + dev_err(&iface->adapter.dev, "timeout in state %s\n", + scx200_acb_state_name[iface->state]); + + iface->state = state_idle; + iface->result = -EIO; + iface->needs_reset = 1; } -#endif /* POLLED_MODE */ static void scx200_acb_reset(struct scx200_acb_iface *iface) { /* Disable the ACCESS.bus device and Configure the SCL - frequency: 16 clock cycles */ + frequency: 16 clock cycles */ outb(0x70, ACBCTL2); /* Polling mode */ outb(0, ACBCTL1); @@ -283,9 +265,9 @@ static void scx200_acb_reset(struct scx200_acb_iface *iface) } static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter, - u16 address, unsigned short flags, - char rw, u8 command, int size, - union i2c_smbus_data *data) + u16 address, unsigned short flags, + char rw, u8 command, int size, + union i2c_smbus_data *data) { struct scx200_acb_iface *iface = i2c_get_adapdata(adapter); int len; @@ -295,53 +277,47 @@ static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter, switch (size) { case I2C_SMBUS_QUICK: - len = 0; - buffer = NULL; - break; + len = 0; + buffer = NULL; + break; + case I2C_SMBUS_BYTE: - if (rw == I2C_SMBUS_READ) { - len = 1; - buffer = &data->byte; - } else { - len = 1; - buffer = &command; - } - break; + len = 1; + buffer = rw ? &data->byte : &command; + break; + case I2C_SMBUS_BYTE_DATA: - len = 1; - buffer = &data->byte; - break; + len = 1; + buffer = &data->byte; + break; + case I2C_SMBUS_WORD_DATA: len = 2; - cur_word = cpu_to_le16(data->word); - buffer = (u8 *)&cur_word; + cur_word = cpu_to_le16(data->word); + buffer = (u8 *)&cur_word; break; + case I2C_SMBUS_BLOCK_DATA: - len = data->block[0]; - buffer = &data->block[1]; + len = data->block[0]; + buffer = &data->block[1]; break; + default: - return -EINVAL; + return -EINVAL; } - DBG("size=%d, address=0x%x, command=0x%x, len=%d, read=%d\n", - size, address, command, len, rw == I2C_SMBUS_READ); + dev_dbg(&adapter->dev, + "size=%d, address=0x%x, command=0x%x, len=%d, read=%d\n", + size, address, command, len, rw); if (!len && rw == I2C_SMBUS_READ) { - dev_warn(&adapter->dev, "zero length read\n"); + dev_dbg(&adapter->dev, "zero length read\n"); return -EINVAL; } - if (len && !buffer) { - dev_warn(&adapter->dev, "nonzero length but no buffer\n"); - return -EFAULT; - } - - down(&iface->sem); + mutex_lock(&iface->mutex); - iface->address_byte = address<<1; - if (rw == I2C_SMBUS_READ) - iface->address_byte |= 1; + iface->address_byte = (address << 1) | rw; iface->command = command; iface->ptr = buffer; iface->len = len; @@ -355,25 +331,21 @@ static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter, else iface->state = state_address; -#ifdef POLLED_MODE while (iface->state != state_idle) scx200_acb_poll(iface); -#else /* POLLED_MODE */ -#error Interrupt driven mode not implemented -#endif /* POLLED_MODE */ if (iface->needs_reset) scx200_acb_reset(iface); rc = iface->result; - up(&iface->sem); + mutex_unlock(&iface->mutex); if (rc == 0 && size == I2C_SMBUS_WORD_DATA && rw == I2C_SMBUS_READ) - data->word = le16_to_cpu(cur_word); + data->word = le16_to_cpu(cur_word); #ifdef DEBUG - DBG(": transfer done, result: %d", rc); + dev_dbg(&adapter->dev, "transfer done, result: %d", rc); if (buffer) { int i; printk(" data:"); @@ -400,17 +372,18 @@ static struct i2c_algorithm scx200_acb_algorithm = { }; static struct scx200_acb_iface *scx200_acb_list; +static DECLARE_MUTEX(scx200_acb_list_mutex); static int scx200_acb_probe(struct scx200_acb_iface *iface) { u8 val; /* Disable the ACCESS.bus device and Configure the SCL - frequency: 16 clock cycles */ + frequency: 16 clock cycles */ outb(0x70, ACBCTL2); if (inb(ACBCTL2) != 0x70) { - DBG("ACBCTL2 readback failed\n"); + pr_debug(NAME ": ACBCTL2 readback failed\n"); return -ENXIO; } @@ -418,7 +391,8 @@ static int scx200_acb_probe(struct scx200_acb_iface *iface) val = inb(ACBCTL1); if (val) { - DBG("disabled, but ACBCTL1=0x%02x\n", val); + pr_debug(NAME ": disabled, but ACBCTL1=0x%02x\n", + val); return -ENXIO; } @@ -428,18 +402,19 @@ static int scx200_acb_probe(struct scx200_acb_iface *iface) val = inb(ACBCTL1); if ((val & ACBCTL1_NMINTE) != ACBCTL1_NMINTE) { - DBG("enabled, but NMINTE won't be set, ACBCTL1=0x%02x\n", val); + pr_debug(NAME ": enabled, but NMINTE won't be set, " + "ACBCTL1=0x%02x\n", val); return -ENXIO; } return 0; } -static int __init scx200_acb_create(int base, int index) +static int __init scx200_acb_create(const char *text, int base, int index) { struct scx200_acb_iface *iface; struct i2c_adapter *adapter; - int rc = 0; + int rc; char description[64]; iface = kzalloc(sizeof(*iface), GFP_KERNEL); @@ -451,50 +426,51 @@ static int __init scx200_acb_create(int base, int index) adapter = &iface->adapter; i2c_set_adapdata(adapter, iface); - snprintf(adapter->name, I2C_NAME_SIZE, "SCx200 ACB%d", index); + snprintf(adapter->name, I2C_NAME_SIZE, "%s ACB%d", text, index); adapter->owner = THIS_MODULE; adapter->id = I2C_HW_SMBUS_SCX200; adapter->algo = &scx200_acb_algorithm; adapter->class = I2C_CLASS_HWMON; - init_MUTEX(&iface->sem); + mutex_init(&iface->mutex); + + snprintf(description, sizeof(description), "%s ACCESS.bus [%s]", + text, adapter->name); - snprintf(description, sizeof(description), "NatSemi SCx200 ACCESS.bus [%s]", adapter->name); if (request_region(base, 8, description) == 0) { - dev_err(&adapter->dev, "can't allocate io 0x%x-0x%x\n", + printk(KERN_ERR NAME ": can't allocate io 0x%x-0x%x\n", base, base + 8-1); rc = -EBUSY; - goto errout; + goto errout_free; } iface->base = base; rc = scx200_acb_probe(iface); if (rc) { - dev_warn(&adapter->dev, "probe failed\n"); - goto errout; + printk(KERN_WARNING NAME ": probe failed\n"); + goto errout_release; } scx200_acb_reset(iface); if (i2c_add_adapter(adapter) < 0) { - dev_err(&adapter->dev, "failed to register\n"); + printk(KERN_ERR NAME ": failed to register\n"); rc = -ENODEV; - goto errout; + goto errout_release; } - lock_kernel(); + down(&scx200_acb_list_mutex); iface->next = scx200_acb_list; scx200_acb_list = iface; - unlock_kernel(); + up(&scx200_acb_list_mutex); return 0; + errout_release: + release_region(iface->base, 8); + errout_free: + kfree(iface); errout: - if (iface) { - if (iface->base) - release_region(iface->base, 8); - kfree(iface); - } return rc; } @@ -504,50 +480,69 @@ static struct pci_device_id scx200[] = { { }, }; +static struct pci_device_id divil_pci[] = { + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, + { } /* NULL entry */ +}; + +#define MSR_LBAR_SMB 0x5140000B + +static int scx200_add_cs553x(void) +{ + u32 low, hi; + u32 smb_base; + + /* Grab & reserve the SMB I/O range */ + rdmsr(MSR_LBAR_SMB, low, hi); + + /* Check the IO mask and whether SMB is enabled */ + if (hi != 0x0000F001) { + printk(KERN_WARNING NAME ": SMBus not enabled\n"); + return -ENODEV; + } + + /* SMBus IO size is 8 bytes */ + smb_base = low & 0x0000FFF8; + + return scx200_acb_create("CS5535", smb_base, 0); +} + static int __init scx200_acb_init(void) { int i; - int rc; + int rc = -ENODEV; pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n"); /* Verify that this really is a SCx200 processor */ - if (pci_dev_present(scx200) == 0) - return -ENODEV; + if (pci_dev_present(scx200)) { + for (i = 0; i < MAX_DEVICES; ++i) { + if (base[i] > 0) + rc = scx200_acb_create("SCx200", base[i], i); + } + } else if (pci_dev_present(divil_pci)) + rc = scx200_add_cs553x(); - rc = -ENXIO; - for (i = 0; i < MAX_DEVICES; ++i) { - if (base[i] > 0) - rc = scx200_acb_create(base[i], i); - } - if (scx200_acb_list) - return 0; return rc; } static void __exit scx200_acb_cleanup(void) { struct scx200_acb_iface *iface; - lock_kernel(); + + down(&scx200_acb_list_mutex); while ((iface = scx200_acb_list) != NULL) { scx200_acb_list = iface->next; - unlock_kernel(); + up(&scx200_acb_list_mutex); i2c_del_adapter(&iface->adapter); release_region(iface->base, 8); kfree(iface); - lock_kernel(); + down(&scx200_acb_list_mutex); } - unlock_kernel(); + up(&scx200_acb_list_mutex); } module_init(scx200_acb_init); module_exit(scx200_acb_cleanup); - -/* - Local variables: - compile-command: "make -k -C ../.. SUBDIRS=drivers/i2c modules" - c-basic-offset: 8 - End: -*/ - diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c index 0710b9da9d5..03d09ed5ec2 100644 --- a/drivers/i2c/chips/ds1374.c +++ b/drivers/i2c/chips/ds1374.c @@ -26,6 +26,7 @@ #include <linux/i2c.h> #include <linux/rtc.h> #include <linux/bcd.h> +#include <linux/mutex.h> #define DS1374_REG_TOD0 0x00 #define DS1374_REG_TOD1 0x01 @@ -41,7 +42,7 @@ #define DS1374_DRV_NAME "ds1374" -static DECLARE_MUTEX(ds1374_mutex); +static DEFINE_MUTEX(ds1374_mutex); static struct i2c_driver ds1374_driver; static struct i2c_client *save_client; @@ -114,7 +115,7 @@ ulong ds1374_get_rtc_time(void) ulong t1, t2; int limit = 10; /* arbitrary retry limit */ - down(&ds1374_mutex); + mutex_lock(&ds1374_mutex); /* * Since the reads are being performed one byte at a time using @@ -127,7 +128,7 @@ ulong ds1374_get_rtc_time(void) t2 = ds1374_read_rtc(); } while (t1 != t2 && limit--); - up(&ds1374_mutex); + mutex_unlock(&ds1374_mutex); if (t1 != t2) { dev_warn(&save_client->dev, @@ -145,7 +146,7 @@ static void ds1374_set_tlet(ulong arg) t1 = *(ulong *) arg; - down(&ds1374_mutex); + mutex_lock(&ds1374_mutex); /* * Since the writes are being performed one byte at a time using @@ -158,7 +159,7 @@ static void ds1374_set_tlet(ulong arg) t2 = ds1374_read_rtc(); } while (t1 != t2 && limit--); - up(&ds1374_mutex); + mutex_unlock(&ds1374_mutex); if (t1 != t2) dev_warn(&save_client->dev, diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c index 41116b7947f..13c108269a6 100644 --- a/drivers/i2c/chips/eeprom.c +++ b/drivers/i2c/chips/eeprom.c @@ -33,6 +33,7 @@ #include <linux/sched.h> #include <linux/jiffies.h> #include <linux/i2c.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, @@ -54,7 +55,7 @@ enum eeprom_nature { /* Each client has this additional data */ struct eeprom_data { struct i2c_client client; - struct semaphore update_lock; + struct mutex update_lock; u8 valid; /* bitfield, bit!=0 if slice is valid */ unsigned long last_updated[8]; /* In jiffies, 8 slices */ u8 data[EEPROM_SIZE]; /* Register values */ @@ -81,7 +82,7 @@ static void eeprom_update_client(struct i2c_client *client, u8 slice) struct eeprom_data *data = i2c_get_clientdata(client); int i, j; - down(&data->update_lock); + mutex_lock(&data->update_lock); if (!(data->valid & (1 << slice)) || time_after(jiffies, data->last_updated[slice] + 300 * HZ)) { @@ -107,7 +108,7 @@ static void eeprom_update_client(struct i2c_client *client, u8 slice) data->valid |= (1 << slice); } exit: - up(&data->update_lock); + mutex_unlock(&data->update_lock); } static ssize_t eeprom_read(struct kobject *kobj, char *buf, loff_t off, size_t count) @@ -187,7 +188,7 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind) /* Fill in the remaining client fields */ strlcpy(new_client->name, "eeprom", I2C_NAME_SIZE); data->valid = 0; - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); data->nature = UNKNOWN; /* Tell the I2C layer a new client has arrived */ diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c index 1251c7fc18d..e6f1ab7b913 100644 --- a/drivers/i2c/chips/isp1301_omap.c +++ b/drivers/i2c/chips/isp1301_omap.c @@ -1635,8 +1635,6 @@ static struct i2c_driver isp1301_driver = { .driver = { .name = "isp1301_omap", }, - .id = 1301, /* FIXME "official", i2c-ids.h */ - .class = I2C_CLASS_HWMON, .attach_adapter = isp1301_scan_bus, .detach_client = isp1301_detach_client, }; diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c index 2dc3d48375f..b5aabe7cf79 100644 --- a/drivers/i2c/chips/m41t00.c +++ b/drivers/i2c/chips/m41t00.c @@ -24,13 +24,14 @@ #include <linux/i2c.h> #include <linux/rtc.h> #include <linux/bcd.h> +#include <linux/mutex.h> #include <asm/time.h> #include <asm/rtc.h> #define M41T00_DRV_NAME "m41t00" -static DECLARE_MUTEX(m41t00_mutex); +static DEFINE_MUTEX(m41t00_mutex); static struct i2c_driver m41t00_driver; static struct i2c_client *save_client; @@ -54,7 +55,7 @@ m41t00_get_rtc_time(void) sec = min = hour = day = mon = year = 0; sec1 = min1 = hour1 = day1 = mon1 = year1 = 0; - down(&m41t00_mutex); + mutex_lock(&m41t00_mutex); do { if (((sec = i2c_smbus_read_byte_data(save_client, 0)) >= 0) && ((min = i2c_smbus_read_byte_data(save_client, 1)) @@ -80,7 +81,7 @@ m41t00_get_rtc_time(void) mon1 = mon; year1 = year; } while (--limit > 0); - up(&m41t00_mutex); + mutex_unlock(&m41t00_mutex); if (limit == 0) { dev_warn(&save_client->dev, @@ -125,7 +126,7 @@ m41t00_set_tlet(ulong arg) BIN_TO_BCD(tm.tm_mday); BIN_TO_BCD(tm.tm_year); - down(&m41t00_mutex); + mutex_lock(&m41t00_mutex); if ((i2c_smbus_write_byte_data(save_client, 0, tm.tm_sec & 0x7f) < 0) || (i2c_smbus_write_byte_data(save_client, 1, tm.tm_min & 0x7f) < 0) @@ -140,7 +141,7 @@ m41t00_set_tlet(ulong arg) dev_warn(&save_client->dev,"m41t00: can't write to rtc chip\n"); - up(&m41t00_mutex); + mutex_unlock(&m41t00_mutex); return; } diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c index 6d3ff584155..88d2ddee449 100644 --- a/drivers/i2c/chips/max6875.c +++ b/drivers/i2c/chips/max6875.c @@ -31,7 +31,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> /* Do not scan - the MAX6875 access method will write to some EEPROM chips */ static unsigned short normal_i2c[] = {I2C_CLIENT_END}; @@ -54,7 +54,7 @@ I2C_CLIENT_INSMOD_1(max6875); /* Each client has this additional data */ struct max6875_data { struct i2c_client client; - struct semaphore update_lock; + struct mutex update_lock; u32 valid; u8 data[USER_EEPROM_SIZE]; @@ -83,7 +83,7 @@ static void max6875_update_slice(struct i2c_client *client, int slice) if (slice >= USER_EEPROM_SLICES) return; - down(&data->update_lock); + mutex_lock(&data->update_lock); buf = &data->data[slice << SLICE_BITS]; @@ -122,7 +122,7 @@ static void max6875_update_slice(struct i2c_client *client, int slice) data->valid |= (1 << slice); } exit_up: - up(&data->update_lock); + mutex_unlock(&data->update_lock); } static ssize_t max6875_read(struct kobject *kobj, char *buf, loff_t off, @@ -196,7 +196,7 @@ static int max6875_detect(struct i2c_adapter *adapter, int address, int kind) real_client->driver = &max6875_driver; real_client->flags = 0; strlcpy(real_client->name, "max6875", I2C_NAME_SIZE); - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Init fake client data */ /* set the client data to the i2c_client so that it will get freed */ diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c index 36cff09c678..925a6b371fd 100644 --- a/drivers/i2c/chips/pcf8591.c +++ b/drivers/i2c/chips/pcf8591.c @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> +#include <linux/mutex.h> /* Addresses to scan */ static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, @@ -74,7 +75,7 @@ MODULE_PARM_DESC(input_mode, struct pcf8591_data { struct i2c_client client; - struct semaphore update_lock; + struct mutex update_lock; u8 control; u8 aout; @@ -144,13 +145,13 @@ static ssize_t set_out0_enable(struct device *dev, struct device_attribute *attr struct pcf8591_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); - down(&data->update_lock); + mutex_lock(&data->update_lock); if (val) data->control |= PCF8591_CONTROL_AOEF; else data->control &= ~PCF8591_CONTROL_AOEF; i2c_smbus_write_byte(client, data->control); - up(&data->update_lock); + mutex_unlock(&data->update_lock); return count; } @@ -200,7 +201,7 @@ static int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind) /* Fill in the remaining client fields and put it into the global list */ strlcpy(new_client->name, "pcf8591", I2C_NAME_SIZE); - init_MUTEX(&data->update_lock); + mutex_init(&data->update_lock); /* Tell the I2C layer a new client has arrived */ if ((err = i2c_attach_client(new_client))) @@ -265,7 +266,7 @@ static int pcf8591_read_channel(struct device *dev, int channel) struct i2c_client *client = to_i2c_client(dev); struct pcf8591_data *data = i2c_get_clientdata(client); - down(&data->update_lock); + mutex_lock(&data->update_lock); if ((data->control & PCF8591_CONTROL_AICH_MASK) != channel) { data->control = (data->control & ~PCF8591_CONTROL_AICH_MASK) @@ -278,7 +279,7 @@ static int pcf8591_read_channel(struct device *dev, int channel) } value = i2c_smbus_read_byte(client); - up(&data->update_lock); + mutex_unlock(&data->update_lock); if ((channel == 2 && input_mode == 2) || (channel != 3 && (input_mode == 1 || input_mode == 3))) diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c index 1af3dfbb808..179b1e022d8 100644 --- a/drivers/i2c/chips/tps65010.c +++ b/drivers/i2c/chips/tps65010.c @@ -32,6 +32,7 @@ #include <linux/suspend.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/mutex.h> #include <asm/irq.h> #include <asm/mach-types.h> @@ -81,7 +82,7 @@ enum tps_model { struct tps65010 { struct i2c_client client; - struct semaphore lock; + struct mutex lock; int irq; struct work_struct work; struct dentry *file; @@ -218,7 +219,7 @@ static int dbg_show(struct seq_file *s, void *_) seq_printf(s, "driver %s\nversion %s\nchip %s\n\n", DRIVER_NAME, DRIVER_VERSION, chip); - down(&tps->lock); + mutex_lock(&tps->lock); /* FIXME how can we tell whether a battery is present? * likely involves a charge gauging chip (like BQ26501). @@ -300,7 +301,7 @@ static int dbg_show(struct seq_file *s, void *_) (v2 & (1 << (4 + i))) ? "rising" : "falling"); } - up(&tps->lock); + mutex_unlock(&tps->lock); return 0; } @@ -416,7 +417,7 @@ static void tps65010_work(void *_tps) { struct tps65010 *tps = _tps; - down(&tps->lock); + mutex_lock(&tps->lock); tps65010_interrupt(tps); @@ -444,7 +445,7 @@ static void tps65010_work(void *_tps) if (test_and_clear_bit(FLAG_IRQ_ENABLE, &tps->flags)) enable_irq(tps->irq); - up(&tps->lock); + mutex_unlock(&tps->lock); } static irqreturn_t tps65010_irq(int irq, void *_tps, struct pt_regs *regs) @@ -505,7 +506,7 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind) if (!tps) return 0; - init_MUTEX(&tps->lock); + mutex_init(&tps->lock); INIT_WORK(&tps->work, tps65010_work, tps); tps->irq = -1; tps->client.addr = address; @@ -695,7 +696,7 @@ int tps65010_set_gpio_out_value(unsigned gpio, unsigned value) if ((gpio < GPIO1) || (gpio > GPIO4)) return -EINVAL; - down(&the_tps->lock); + mutex_lock(&the_tps->lock); defgpio = i2c_smbus_read_byte_data(&the_tps->client, TPS_DEFGPIO); @@ -720,7 +721,7 @@ int tps65010_set_gpio_out_value(unsigned gpio, unsigned value) gpio, value ? "high" : "low", i2c_smbus_read_byte_data(&the_tps->client, TPS_DEFGPIO)); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } EXPORT_SYMBOL(tps65010_set_gpio_out_value); @@ -745,7 +746,7 @@ int tps65010_set_led(unsigned led, unsigned mode) led = LED2; } - down(&the_tps->lock); + mutex_lock(&the_tps->lock); pr_debug("%s: led%i_on 0x%02x\n", DRIVER_NAME, led, i2c_smbus_read_byte_data(&the_tps->client, @@ -771,7 +772,7 @@ int tps65010_set_led(unsigned led, unsigned mode) default: printk(KERN_ERR "%s: Wrong mode parameter for set_led()\n", DRIVER_NAME); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return -EINVAL; } @@ -781,7 +782,7 @@ int tps65010_set_led(unsigned led, unsigned mode) if (status != 0) { printk(KERN_ERR "%s: Failed to write led%i_on register\n", DRIVER_NAME, led); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } @@ -794,7 +795,7 @@ int tps65010_set_led(unsigned led, unsigned mode) if (status != 0) { printk(KERN_ERR "%s: Failed to write led%i_per register\n", DRIVER_NAME, led); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } @@ -802,7 +803,7 @@ int tps65010_set_led(unsigned led, unsigned mode) i2c_smbus_read_byte_data(&the_tps->client, TPS_LED1_PER + offs)); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } @@ -820,7 +821,7 @@ int tps65010_set_vib(unsigned value) if (!the_tps) return -ENODEV; - down(&the_tps->lock); + mutex_lock(&the_tps->lock); vdcdc2 = i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC2); vdcdc2 &= ~(1 << 1); @@ -831,7 +832,7 @@ int tps65010_set_vib(unsigned value) pr_debug("%s: vibrator %s\n", DRIVER_NAME, value ? "on" : "off"); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } EXPORT_SYMBOL(tps65010_set_vib); @@ -848,7 +849,7 @@ int tps65010_set_low_pwr(unsigned mode) if (!the_tps) return -ENODEV; - down(&the_tps->lock); + mutex_lock(&the_tps->lock); pr_debug("%s: %s low_pwr, vdcdc1 0x%02x\n", DRIVER_NAME, mode ? "enable" : "disable", @@ -876,7 +877,7 @@ int tps65010_set_low_pwr(unsigned mode) pr_debug("%s: vdcdc1 0x%02x\n", DRIVER_NAME, i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1)); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } @@ -894,7 +895,7 @@ int tps65010_config_vregs1(unsigned value) if (!the_tps) return -ENODEV; - down(&the_tps->lock); + mutex_lock(&the_tps->lock); pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME, i2c_smbus_read_byte_data(&the_tps->client, TPS_VREGS1)); @@ -909,7 +910,7 @@ int tps65010_config_vregs1(unsigned value) pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME, i2c_smbus_read_byte_data(&the_tps->client, TPS_VREGS1)); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } @@ -931,7 +932,7 @@ int tps65013_set_low_pwr(unsigned mode) if (!the_tps || the_tps->por) return -ENODEV; - down(&the_tps->lock); + mutex_lock(&the_tps->lock); pr_debug("%s: %s low_pwr, chgconfig 0x%02x vdcdc1 0x%02x\n", DRIVER_NAME, @@ -959,7 +960,7 @@ int tps65013_set_low_pwr(unsigned mode) if (status != 0) { printk(KERN_ERR "%s: Failed to write chconfig register\n", DRIVER_NAME); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } @@ -977,7 +978,7 @@ int tps65013_set_low_pwr(unsigned mode) pr_debug("%s: vdcdc1 0x%02x\n", DRIVER_NAME, i2c_smbus_read_byte_data(&the_tps->client, TPS_VDCDC1)); - up(&the_tps->lock); + mutex_unlock(&the_tps->lock); return status; } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 1a2c9ab5d9e..45e2cdf5473 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -31,12 +31,13 @@ #include <linux/idr.h> #include <linux/seq_file.h> #include <linux/platform_device.h> +#include <linux/mutex.h> #include <asm/uaccess.h> static LIST_HEAD(adapters); static LIST_HEAD(drivers); -static DECLARE_MUTEX(core_lists); +static DEFINE_MUTEX(core_lists); static DEFINE_IDR(i2c_adapter_idr); /* match always succeeds, as we want the probe() to tell if we really accept this match */ @@ -153,7 +154,7 @@ int i2c_add_adapter(struct i2c_adapter *adap) struct list_head *item; struct i2c_driver *driver; - down(&core_lists); + mutex_lock(&core_lists); if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) { res = -ENOMEM; @@ -168,8 +169,8 @@ int i2c_add_adapter(struct i2c_adapter *adap) } adap->nr = id & MAX_ID_MASK; - init_MUTEX(&adap->bus_lock); - init_MUTEX(&adap->clist_lock); + mutex_init(&adap->bus_lock); + mutex_init(&adap->clist_lock); list_add_tail(&adap->list,&adapters); INIT_LIST_HEAD(&adap->clients); @@ -203,7 +204,7 @@ int i2c_add_adapter(struct i2c_adapter *adap) } out_unlock: - up(&core_lists); + mutex_unlock(&core_lists); return res; } @@ -216,7 +217,7 @@ int i2c_del_adapter(struct i2c_adapter *adap) struct i2c_client *client; int res = 0; - down(&core_lists); + mutex_lock(&core_lists); /* First make sure that this adapter was ever added */ list_for_each_entry(adap_from_list, &adapters, list) { @@ -272,7 +273,7 @@ int i2c_del_adapter(struct i2c_adapter *adap) dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); out_unlock: - up(&core_lists); + mutex_unlock(&core_lists); return res; } @@ -287,9 +288,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) { struct list_head *item; struct i2c_adapter *adapter; - int res = 0; - - down(&core_lists); + int res; /* add the driver to the list of i2c drivers in the driver core */ driver->driver.owner = owner; @@ -297,8 +296,10 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) res = driver_register(&driver->driver); if (res) - goto out_unlock; + return res; + mutex_lock(&core_lists); + list_add_tail(&driver->list,&drivers); pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); @@ -310,9 +311,8 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) } } - out_unlock: - up(&core_lists); - return res; + mutex_unlock(&core_lists); + return 0; } EXPORT_SYMBOL(i2c_register_driver); @@ -324,7 +324,7 @@ int i2c_del_driver(struct i2c_driver *driver) int res = 0; - down(&core_lists); + mutex_lock(&core_lists); /* Have a look at each adapter, if clients of this driver are still * attached. If so, detach them to be able to kill the driver @@ -363,7 +363,7 @@ int i2c_del_driver(struct i2c_driver *driver) pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name); out_unlock: - up(&core_lists); + mutex_unlock(&core_lists); return 0; } @@ -384,9 +384,9 @@ int i2c_check_addr(struct i2c_adapter *adapter, int addr) { int rval; - down(&adapter->clist_lock); + mutex_lock(&adapter->clist_lock); rval = __i2c_check_addr(adapter, addr); - up(&adapter->clist_lock); + mutex_unlock(&adapter->clist_lock); return rval; } @@ -395,13 +395,13 @@ int i2c_attach_client(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; - down(&adapter->clist_lock); + mutex_lock(&adapter->clist_lock); if (__i2c_check_addr(client->adapter, client->addr)) { - up(&adapter->clist_lock); + mutex_unlock(&adapter->clist_lock); return -EBUSY; } list_add_tail(&client->list,&adapter->clients); - up(&adapter->clist_lock); + mutex_unlock(&adapter->clist_lock); if (adapter->client_register) { if (adapter->client_register(client)) { @@ -450,12 +450,12 @@ int i2c_detach_client(struct i2c_client *client) } } - down(&adapter->clist_lock); + mutex_lock(&adapter->clist_lock); list_del(&client->list); init_completion(&client->released); device_remove_file(&client->dev, &dev_attr_client_name); device_unregister(&client->dev); - up(&adapter->clist_lock); + mutex_unlock(&adapter->clist_lock); wait_for_completion(&client->released); out: @@ -513,19 +513,19 @@ void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg) struct list_head *item; struct i2c_client *client; - down(&adap->clist_lock); + mutex_lock(&adap->clist_lock); list_for_each(item,&adap->clients) { client = list_entry(item, struct i2c_client, list); if (!try_module_get(client->driver->driver.owner)) continue; if (NULL != client->driver->command) { - up(&adap->clist_lock); + mutex_unlock(&adap->clist_lock); client->driver->command(client,cmd,arg); - down(&adap->clist_lock); + mutex_lock(&adap->clist_lock); } module_put(client->driver->driver.owner); } - up(&adap->clist_lock); + mutex_unlock(&adap->clist_lock); } static int __init i2c_init(void) @@ -569,9 +569,9 @@ int i2c_transfer(struct i2c_adapter * adap, struct i2c_msg *msgs, int num) } #endif - down(&adap->bus_lock); + mutex_lock(&adap->bus_lock); ret = adap->algo->master_xfer(adap,msgs,num); - up(&adap->bus_lock); + mutex_unlock(&adap->bus_lock); return ret; } else { @@ -779,12 +779,12 @@ struct i2c_adapter* i2c_get_adapter(int id) { struct i2c_adapter *adapter; - down(&core_lists); + mutex_lock(&core_lists); adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id); if (adapter && !try_module_get(adapter->owner)) adapter = NULL; - up(&core_lists); + mutex_unlock(&core_lists); return adapter; } @@ -919,12 +919,11 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command, u8 length, u8 *values) { union i2c_smbus_data data; - int i; + if (length > I2C_SMBUS_BLOCK_MAX) length = I2C_SMBUS_BLOCK_MAX; - for (i = 1; i <= length; i++) - data.block[i] = values[i-1]; data.block[0] = length; + memcpy(&data.block[1], values, length); return i2c_smbus_xfer(client->adapter,client->addr,client->flags, I2C_SMBUS_WRITE,command, I2C_SMBUS_BLOCK_DATA,&data); @@ -934,16 +933,14 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command, s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command, u8 *values) { union i2c_smbus_data data; - int i; + if (i2c_smbus_xfer(client->adapter,client->addr,client->flags, I2C_SMBUS_READ,command, I2C_SMBUS_I2C_BLOCK_DATA,&data)) return -1; - else { - for (i = 1; i <= data.block[0]; i++) - values[i-1] = data.block[i]; - return data.block[0]; - } + + memcpy(values, &data.block[1], data.block[0]); + return data.block[0]; } s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command, @@ -1118,10 +1115,10 @@ s32 i2c_smbus_xfer(struct i2c_adapter * adapter, u16 addr, unsigned short flags, flags &= I2C_M_TEN | I2C_CLIENT_PEC; if (adapter->algo->smbus_xfer) { - down(&adapter->bus_lock); + mutex_lock(&adapter->bus_lock); res = adapter->algo->smbus_xfer(adapter,addr,flags,read_write, command,size,data); - up(&adapter->bus_lock); + mutex_unlock(&adapter->bus_lock); } else res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write, command,size,data); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 3325660f724..c7671e18801 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -313,6 +313,7 @@ #include <linux/cdrom.h> #include <linux/ide.h> #include <linux/completion.h> +#include <linux/mutex.h> #include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */ @@ -324,7 +325,7 @@ #include "ide-cd.h" -static DECLARE_MUTEX(idecd_ref_sem); +static DEFINE_MUTEX(idecd_ref_mutex); #define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref) @@ -335,11 +336,11 @@ static struct cdrom_info *ide_cd_get(struct gendisk *disk) { struct cdrom_info *cd = NULL; - down(&idecd_ref_sem); + mutex_lock(&idecd_ref_mutex); cd = ide_cd_g(disk); if (cd) kref_get(&cd->kref); - up(&idecd_ref_sem); + mutex_unlock(&idecd_ref_mutex); return cd; } @@ -347,9 +348,9 @@ static void ide_cd_release(struct kref *); static void ide_cd_put(struct cdrom_info *cd) { - down(&idecd_ref_sem); + mutex_lock(&idecd_ref_mutex); kref_put(&cd->kref, ide_cd_release); - up(&idecd_ref_sem); + mutex_unlock(&idecd_ref_mutex); } /**************************************************************************** @@ -2471,52 +2472,6 @@ static int ide_cdrom_packet(struct cdrom_device_info *cdi, } static -int ide_cdrom_dev_ioctl (struct cdrom_device_info *cdi, - unsigned int cmd, unsigned long arg) -{ - struct packet_command cgc; - char buffer[16]; - int stat; - - init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); - - /* These will be moved into the Uniform layer shortly... */ - switch (cmd) { - case CDROMSETSPINDOWN: { - char spindown; - - if (copy_from_user(&spindown, (void __user *) arg, sizeof(char))) - return -EFAULT; - - if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0))) - return stat; - - buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f); - - return cdrom_mode_select(cdi, &cgc); - } - - case CDROMGETSPINDOWN: { - char spindown; - - if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0))) - return stat; - - spindown = buffer[11] & 0x0f; - - if (copy_to_user((void __user *) arg, &spindown, sizeof (char))) - return -EFAULT; - - return 0; - } - - default: - return -EINVAL; - } - -} - -static int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi, unsigned int cmd, void *arg) @@ -2852,12 +2807,11 @@ static struct cdrom_device_ops ide_cdrom_dops = { .get_mcn = ide_cdrom_get_mcn, .reset = ide_cdrom_reset, .audio_ioctl = ide_cdrom_audio_ioctl, - .dev_ioctl = ide_cdrom_dev_ioctl, .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | - CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R | + CDC_DRIVE_STATUS | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R| CDC_DVD_RAM | CDC_GENERIC_PACKET | CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM, @@ -3367,6 +3321,45 @@ static int idecd_release(struct inode * inode, struct file * file) return 0; } +static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg) +{ + struct packet_command cgc; + char buffer[16]; + int stat; + char spindown; + + if (copy_from_user(&spindown, (void __user *)arg, sizeof(char))) + return -EFAULT; + + init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); + + stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); + if (stat) + return stat; + + buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f); + return cdrom_mode_select(cdi, &cgc); +} + +static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg) +{ + struct packet_command cgc; + char buffer[16]; + int stat; + char spindown; + + init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); + + stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); + if (stat) + return stat; + + spindown = buffer[11] & 0x0f; + if (copy_to_user((void __user *)arg, &spindown, sizeof (char))) + return -EFAULT; + return 0; +} + static int idecd_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { @@ -3374,7 +3367,16 @@ static int idecd_ioctl (struct inode *inode, struct file *file, struct cdrom_info *info = ide_cd_g(bdev->bd_disk); int err; - err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); + switch (cmd) { + case CDROMSETSPINDOWN: + return idecd_set_spindown(&info->devinfo, arg); + case CDROMGETSPINDOWN: + return idecd_get_spindown(&info->devinfo, arg); + default: + break; + } + + err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); if (err == -EINVAL) err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg); diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 09086b8b648..e238b7da824 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -60,6 +60,7 @@ #include <linux/genhd.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/mutex.h> #define _IDE_DISK @@ -78,7 +79,7 @@ struct ide_disk_obj { struct kref kref; }; -static DECLARE_MUTEX(idedisk_ref_sem); +static DEFINE_MUTEX(idedisk_ref_mutex); #define to_ide_disk(obj) container_of(obj, struct ide_disk_obj, kref) @@ -89,11 +90,11 @@ static struct ide_disk_obj *ide_disk_get(struct gendisk *disk) { struct ide_disk_obj *idkp = NULL; - down(&idedisk_ref_sem); + mutex_lock(&idedisk_ref_mutex); idkp = ide_disk_g(disk); if (idkp) kref_get(&idkp->kref); - up(&idedisk_ref_sem); + mutex_unlock(&idedisk_ref_mutex); return idkp; } @@ -101,9 +102,9 @@ static void ide_disk_release(struct kref *); static void ide_disk_put(struct ide_disk_obj *idkp) { - down(&idedisk_ref_sem); + mutex_lock(&idedisk_ref_mutex); kref_put(&idkp->kref, ide_disk_release); - up(&idedisk_ref_sem); + mutex_unlock(&idedisk_ref_mutex); } /* diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 1f8db9ac05d..a53e3ce4a14 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -98,6 +98,7 @@ #include <linux/cdrom.h> #include <linux/ide.h> #include <linux/bitops.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <asm/irq.h> @@ -517,7 +518,7 @@ typedef struct { u8 reserved[4]; } idefloppy_mode_parameter_header_t; -static DECLARE_MUTEX(idefloppy_ref_sem); +static DEFINE_MUTEX(idefloppy_ref_mutex); #define to_ide_floppy(obj) container_of(obj, struct ide_floppy_obj, kref) @@ -528,11 +529,11 @@ static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk) { struct ide_floppy_obj *floppy = NULL; - down(&idefloppy_ref_sem); + mutex_lock(&idefloppy_ref_mutex); floppy = ide_floppy_g(disk); if (floppy) kref_get(&floppy->kref); - up(&idefloppy_ref_sem); + mutex_unlock(&idefloppy_ref_mutex); return floppy; } @@ -540,9 +541,9 @@ static void ide_floppy_release(struct kref *); static void ide_floppy_put(struct ide_floppy_obj *floppy) { - down(&idefloppy_ref_sem); + mutex_lock(&idefloppy_ref_mutex); kref_put(&floppy->kref, ide_floppy_release); - up(&idefloppy_ref_sem); + mutex_unlock(&idefloppy_ref_mutex); } /* diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 0101d0def7c..ebc59064b47 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -443,6 +443,7 @@ #include <linux/smp_lock.h> #include <linux/completion.h> #include <linux/bitops.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <asm/irq.h> @@ -1011,7 +1012,7 @@ typedef struct ide_tape_obj { int debug_level; } idetape_tape_t; -static DECLARE_MUTEX(idetape_ref_sem); +static DEFINE_MUTEX(idetape_ref_mutex); static struct class *idetape_sysfs_class; @@ -1024,11 +1025,11 @@ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) { struct ide_tape_obj *tape = NULL; - down(&idetape_ref_sem); + mutex_lock(&idetape_ref_mutex); tape = ide_tape_g(disk); if (tape) kref_get(&tape->kref); - up(&idetape_ref_sem); + mutex_unlock(&idetape_ref_mutex); return tape; } @@ -1036,9 +1037,9 @@ static void ide_tape_release(struct kref *); static void ide_tape_put(struct ide_tape_obj *tape) { - down(&idetape_ref_sem); + mutex_lock(&idetape_ref_mutex); kref_put(&tape->kref, ide_tape_release); - up(&idetape_ref_sem); + mutex_unlock(&idetape_ref_mutex); } /* @@ -1290,11 +1291,11 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i) { struct ide_tape_obj *tape = NULL; - down(&idetape_ref_sem); + mutex_lock(&idetape_ref_mutex); tape = idetape_devs[i]; if (tape) kref_get(&tape->kref); - up(&idetape_ref_sem); + mutex_unlock(&idetape_ref_mutex); return tape; } @@ -4870,11 +4871,11 @@ static int ide_tape_probe(ide_drive_t *drive) drive->driver_data = tape; - down(&idetape_ref_sem); + mutex_lock(&idetape_ref_mutex); for (minor = 0; idetape_devs[minor]; minor++) ; idetape_devs[minor] = tape; - up(&idetape_ref_sem); + mutex_unlock(&idetape_ref_mutex); idetape_setup(drive, tape, minor); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index feec40cf590..8c4fcb9027b 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -32,6 +32,7 @@ #ifdef CONFIG_AVMB1_COMPAT #include <linux/b1lli.h> #endif +#include <linux/mutex.h> static char *revision = "$Revision: 1.1.2.8 $"; @@ -66,7 +67,7 @@ LIST_HEAD(capi_drivers); DEFINE_RWLOCK(capi_drivers_list_lock); static DEFINE_RWLOCK(application_lock); -static DECLARE_MUTEX(controller_sem); +static DEFINE_MUTEX(controller_mutex); struct capi20_appl *capi_applications[CAPI_MAXAPPL]; struct capi_ctr *capi_cards[CAPI_MAXCONTR]; @@ -395,20 +396,20 @@ attach_capi_ctr(struct capi_ctr *card) { int i; - down(&controller_sem); + mutex_lock(&controller_mutex); for (i = 0; i < CAPI_MAXCONTR; i++) { if (capi_cards[i] == NULL) break; } if (i == CAPI_MAXCONTR) { - up(&controller_sem); + mutex_unlock(&controller_mutex); printk(KERN_ERR "kcapi: out of controller slots\n"); return -EBUSY; } capi_cards[i] = card; - up(&controller_sem); + mutex_unlock(&controller_mutex); card->nrecvctlpkt = 0; card->nrecvdatapkt = 0; @@ -531,13 +532,13 @@ u16 capi20_register(struct capi20_appl *ap) write_unlock_irqrestore(&application_lock, flags); - down(&controller_sem); + mutex_lock(&controller_mutex); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) continue; register_appl(capi_cards[i], applid, &ap->rparam); } - up(&controller_sem); + mutex_unlock(&controller_mutex); if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: appl %d up\n", applid); @@ -560,13 +561,13 @@ u16 capi20_release(struct capi20_appl *ap) capi_applications[ap->applid - 1] = NULL; write_unlock_irqrestore(&application_lock, flags); - down(&controller_sem); + mutex_lock(&controller_mutex); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) continue; release_appl(capi_cards[i], ap->applid); } - up(&controller_sem); + mutex_unlock(&controller_mutex); flush_scheduled_work(); skb_queue_purge(&ap->recv_queue); diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index df9d6520181..27332506f9f 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -25,7 +25,6 @@ #include <linux/workqueue.h> #include <linux/interrupt.h> #define HISAX_STATUS_BUFSIZE 4096 -#define INCLUDE_INLINE_FUNCS /* * This structure array contains one entry per card. An entry looks diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c index 110e9fd669c..f8ca4b32333 100644 --- a/drivers/isdn/hisax/elsa.c +++ b/drivers/isdn/hisax/elsa.c @@ -108,7 +108,6 @@ static const char *ITACVer[] = #define ELSA_ASSIGN 4 #define RS_ISR_PASS_LIMIT 256 -#define _INLINE_ inline #define FLG_MODEM_ACTIVE 1 /* IPAC AUX */ #define ELSA_IPAC_LINE_LED 0x40 /* Bit 6 Gelbe LED */ diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c index d18bf9097dc..671e36db224 100644 --- a/drivers/media/video/adv7170.c +++ b/drivers/media/video/adv7170.c @@ -53,7 +53,6 @@ MODULE_AUTHOR("Maxim Yevtyushkin"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(x) (x)->name diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c index 1a2b111897d..085e8863cac 100644 --- a/drivers/media/video/adv7175.c +++ b/drivers/media/video/adv7175.c @@ -49,7 +49,6 @@ MODULE_AUTHOR("Dave Perks"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(s) (s)->name diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c index c2c450fbf68..d8a18a6a5be 100644 --- a/drivers/media/video/bt819.c +++ b/drivers/media/video/bt819.c @@ -53,7 +53,6 @@ MODULE_AUTHOR("Mike Bernson & Dave Perks"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(s) (s)->name diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c index 7c68937853b..4d47a0a0e97 100644 --- a/drivers/media/video/bt856.c +++ b/drivers/media/video/bt856.c @@ -53,7 +53,6 @@ MODULE_AUTHOR("Mike Bernson & Dave Perks"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(s) (s)->name diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c index 36123fa0b63..e18ea268384 100644 --- a/drivers/media/video/saa7110.c +++ b/drivers/media/video/saa7110.c @@ -39,7 +39,6 @@ MODULE_AUTHOR("Pauline Middelink"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(s) (s)->name diff --git a/drivers/media/video/saa7111.c b/drivers/media/video/saa7111.c index cb27917c376..f9ba0c943ad 100644 --- a/drivers/media/video/saa7111.c +++ b/drivers/media/video/saa7111.c @@ -52,7 +52,6 @@ MODULE_AUTHOR("Dave Perks"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(s) (s)->name diff --git a/drivers/media/video/saa7114.c b/drivers/media/video/saa7114.c index 849b54cd5c2..4a1f841d0c7 100644 --- a/drivers/media/video/saa7114.c +++ b/drivers/media/video/saa7114.c @@ -55,7 +55,6 @@ MODULE_AUTHOR("Maxim Yevtyushkin"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(x) (x)->name diff --git a/drivers/media/video/saa711x.c b/drivers/media/video/saa711x.c index 6c161f2f5e2..708fae51e8e 100644 --- a/drivers/media/video/saa711x.c +++ b/drivers/media/video/saa711x.c @@ -45,7 +45,6 @@ MODULE_AUTHOR("Dave Perks, Jose Ignacio Gijon, Joerg Heckenbach, Mark McClelland MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(s) (s)->name diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c index 67dfa718645..9f99ee1303e 100644 --- a/drivers/media/video/saa7185.c +++ b/drivers/media/video/saa7185.c @@ -49,7 +49,6 @@ MODULE_AUTHOR("Dave Perks"); MODULE_LICENSE("GPL"); #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(s) (s)->name diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c index d0a1e72ea8c..4cd57996748 100644 --- a/drivers/media/video/vpx3220.c +++ b/drivers/media/video/vpx3220.c @@ -30,7 +30,6 @@ #include <asm/uaccess.h> #include <linux/i2c.h> -#include <linux/i2c-dev.h> #define I2C_NAME(x) (x)->name diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 7d213707008..2671da20a49 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -9,13 +9,54 @@ * Written by: Michael Chan (mchan@broadcom.com) */ +#include <linux/config.h> + +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <asm/bitops.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include <linux/time.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#ifdef NETIF_F_HW_VLAN_TX +#include <linux/if_vlan.h> +#define BCM_VLAN 1 +#endif +#ifdef NETIF_F_TSO +#include <net/ip.h> +#include <net/tcp.h> +#include <net/checksum.h> +#define BCM_TSO 1 +#endif +#include <linux/workqueue.h> +#include <linux/crc32.h> +#include <linux/prefetch.h> +#include <linux/cache.h> + #include "bnx2.h" #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.4.38" -#define DRV_MODULE_RELDATE "February 10, 2006" +#define DRV_MODULE_VERSION "1.4.39" +#define DRV_MODULE_RELDATE "March 22, 2006" #define RUN_AT(x) (jiffies + (x)) @@ -313,8 +354,6 @@ bnx2_disable_int(struct bnx2 *bp) static void bnx2_enable_int(struct bnx2 *bp) { - u32 val; - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); @@ -322,8 +361,7 @@ bnx2_enable_int(struct bnx2 *bp) REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx); - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW); + REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); } static void @@ -362,15 +400,11 @@ bnx2_free_mem(struct bnx2 *bp) { int i; - if (bp->stats_blk) { - pci_free_consistent(bp->pdev, sizeof(struct statistics_block), - bp->stats_blk, bp->stats_blk_mapping); - bp->stats_blk = NULL; - } if (bp->status_blk) { - pci_free_consistent(bp->pdev, sizeof(struct status_block), + pci_free_consistent(bp->pdev, bp->status_stats_size, bp->status_blk, bp->status_blk_mapping); bp->status_blk = NULL; + bp->stats_blk = NULL; } if (bp->tx_desc_ring) { pci_free_consistent(bp->pdev, @@ -395,14 +429,13 @@ bnx2_free_mem(struct bnx2 *bp) static int bnx2_alloc_mem(struct bnx2 *bp) { - int i; + int i, status_blk_size; - bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT, - GFP_KERNEL); + bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT, + GFP_KERNEL); if (bp->tx_buf_ring == NULL) return -ENOMEM; - memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT); bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, sizeof(struct tx_bd) * TX_DESC_CNT, @@ -428,21 +461,22 @@ bnx2_alloc_mem(struct bnx2 *bp) } - bp->status_blk = pci_alloc_consistent(bp->pdev, - sizeof(struct status_block), + /* Combine status and statistics blocks into one allocation. */ + status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); + bp->status_stats_size = status_blk_size + + sizeof(struct statistics_block); + + bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, &bp->status_blk_mapping); if (bp->status_blk == NULL) goto alloc_mem_err; - memset(bp->status_blk, 0, sizeof(struct status_block)); + memset(bp->status_blk, 0, bp->status_stats_size); - bp->stats_blk = pci_alloc_consistent(bp->pdev, - sizeof(struct statistics_block), - &bp->stats_blk_mapping); - if (bp->stats_blk == NULL) - goto alloc_mem_err; + bp->stats_blk = (void *) ((unsigned long) bp->status_blk + + status_blk_size); - memset(bp->stats_blk, 0, sizeof(struct statistics_block)); + bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; return 0; @@ -1926,6 +1960,13 @@ bnx2_poll(struct net_device *dev, int *budget) spin_lock(&bp->phy_lock); bnx2_phy_int(bp); spin_unlock(&bp->phy_lock); + + /* This is needed to take care of transient status + * during link changes. + */ + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_RD(bp, BNX2_HC_COMMAND); } if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) @@ -3307,6 +3348,8 @@ bnx2_init_chip(struct bnx2 *bp) udelay(20); + bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); + return rc; } @@ -3746,7 +3789,6 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) struct sk_buff *skb, *rx_skb; unsigned char *packet; u16 rx_start_idx, rx_idx; - u32 val; dma_addr_t map; struct tx_bd *txbd; struct sw_bd *rx_buf; @@ -3777,8 +3819,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) map = pci_map_single(bp->pdev, skb->data, pkt_size, PCI_DMA_TODEVICE); - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_RD(bp, BNX2_HC_COMMAND); udelay(5); @@ -3802,8 +3845,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) udelay(100); - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_RD(bp, BNX2_HC_COMMAND); udelay(5); @@ -3939,7 +3983,6 @@ static int bnx2_test_intr(struct bnx2 *bp) { int i; - u32 val; u16 status_idx; if (!netif_running(bp->dev)) @@ -3948,8 +3991,7 @@ bnx2_test_intr(struct bnx2 *bp) status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; /* This register is not touched during run-time. */ - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW); + REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); REG_RD(bp, BNX2_HC_COMMAND); for (i = 0; i < 10; i++) { diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index fd4b7f2eb47..b87925f6a22 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -13,46 +13,6 @@ #ifndef BNX2_H #define BNX2_H -#include <linux/config.h> - -#include <linux/module.h> -#include <linux/moduleparam.h> - -#include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/dma-mapping.h> -#include <asm/bitops.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <linux/delay.h> -#include <asm/byteorder.h> -#include <linux/time.h> -#include <linux/ethtool.h> -#include <linux/mii.h> -#ifdef NETIF_F_HW_VLAN_TX -#include <linux/if_vlan.h> -#define BCM_VLAN 1 -#endif -#ifdef NETIF_F_TSO -#include <net/ip.h> -#include <net/tcp.h> -#include <net/checksum.h> -#define BCM_TSO 1 -#endif -#include <linux/workqueue.h> -#include <linux/crc32.h> -#include <linux/prefetch.h> - /* Hardware data structures and register definitions automatically * generated from RTL code. Do not modify. */ @@ -3917,15 +3877,17 @@ struct bnx2 { #define USING_MSI_FLAG 0x20 #define ASF_ENABLE_FLAG 0x40 - struct tx_bd *tx_desc_ring; - struct sw_bd *tx_buf_ring; - u32 tx_prod_bseq; - u16 tx_prod; - u16 tx_cons; - int tx_ring_size; + /* Put tx producer and consumer fields in separate cache lines. */ - u16 hw_tx_cons; - u16 hw_rx_cons; + u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); + u16 tx_prod; + + struct tx_bd *tx_desc_ring; + struct sw_bd *tx_buf_ring; + int tx_ring_size; + + u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); + u16 hw_tx_cons; #ifdef BCM_VLAN struct vlan_group *vlgrp; @@ -3939,6 +3901,7 @@ struct bnx2 { u32 rx_prod_bseq; u16 rx_prod; u16 rx_cons; + u16 hw_rx_cons; u32 rx_csum; @@ -4038,6 +4001,7 @@ struct bnx2 { struct statistics_block *stats_blk; dma_addr_t stats_blk_mapping; + u32 hc_cmd; u32 rx_mode; u16 req_line_speed; @@ -4082,6 +4046,8 @@ struct bnx2 { struct flash_spec *flash_info; u32 flash_size; + + int status_stats_size; }; static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 690a1aae0b3..0c13795dca3 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -172,11 +172,9 @@ static struct net_device_stats *get_stats(struct net_device *dev) memset(stats, 0, sizeof(struct net_device_stats)); - for (i=0; i < NR_CPUS; i++) { + for_each_cpu(i) { struct net_device_stats *lb_stats; - if (!cpu_possible(i)) - continue; lb_stats = &per_cpu(loopback_stats, i); stats->rx_bytes += lb_stats->rx_bytes; stats->tx_bytes += lb_stats->tx_bytes; diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index f608c12e3e8..b2073fce821 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -46,6 +46,7 @@ #include <linux/rwsem.h> #include <linux/stddef.h> #include <linux/device.h> +#include <linux/mutex.h> #include <net/slhc_vj.h> #include <asm/atomic.h> @@ -198,11 +199,11 @@ static unsigned int cardmap_find_first_free(struct cardmap *map); static void cardmap_destroy(struct cardmap **map); /* - * all_ppp_sem protects the all_ppp_units mapping. + * all_ppp_mutex protects the all_ppp_units mapping. * It also ensures that finding a ppp unit in the all_ppp_units map * and updating its file.refcnt field is atomic. */ -static DECLARE_MUTEX(all_ppp_sem); +static DEFINE_MUTEX(all_ppp_mutex); static struct cardmap *all_ppp_units; static atomic_t ppp_unit_count = ATOMIC_INIT(0); @@ -804,7 +805,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, /* Attach to an existing ppp unit */ if (get_user(unit, p)) break; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); err = -ENXIO; ppp = ppp_find_unit(unit); if (ppp != 0) { @@ -812,7 +813,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, file->private_data = &ppp->file; err = 0; } - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); break; case PPPIOCATTCHAN: @@ -2446,7 +2447,7 @@ ppp_create_interface(int unit, int *retp) dev->do_ioctl = ppp_net_ioctl; ret = -EEXIST; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); if (unit < 0) unit = cardmap_find_first_free(all_ppp_units); else if (cardmap_get(all_ppp_units, unit) != NULL) @@ -2465,12 +2466,12 @@ ppp_create_interface(int unit, int *retp) atomic_inc(&ppp_unit_count); cardmap_set(&all_ppp_units, unit, ppp); - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); *retp = 0; return ppp; out2: - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); free_netdev(dev); out1: kfree(ppp); @@ -2500,7 +2501,7 @@ static void ppp_shutdown_interface(struct ppp *ppp) { struct net_device *dev; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); ppp_lock(ppp); dev = ppp->dev; ppp->dev = NULL; @@ -2514,7 +2515,7 @@ static void ppp_shutdown_interface(struct ppp *ppp) ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); } /* @@ -2556,7 +2557,7 @@ static void ppp_destroy_interface(struct ppp *ppp) /* * Locate an existing ppp unit. - * The caller should have locked the all_ppp_sem. + * The caller should have locked the all_ppp_mutex. */ static struct ppp * ppp_find_unit(int unit) @@ -2601,7 +2602,7 @@ ppp_connect_channel(struct channel *pch, int unit) int ret = -ENXIO; int hdrlen; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); ppp = ppp_find_unit(unit); if (ppp == 0) goto out; @@ -2626,7 +2627,7 @@ ppp_connect_channel(struct channel *pch, int unit) outl: write_unlock_bh(&pch->upl); out: - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); return ret; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 88829eb9568..b5473325bff 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -69,8 +69,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.53" -#define DRV_MODULE_RELDATE "Mar 22, 2006" +#define DRV_MODULE_VERSION "3.54" +#define DRV_MODULE_RELDATE "Mar 23, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -225,6 +225,10 @@ static struct pci_device_id tg3_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, @@ -4557,6 +4561,7 @@ static int tg3_chip_reset(struct tg3 *tp) } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) tw32(GRC_FASTBOOT_PC, 0); @@ -6152,6 +6157,9 @@ static int tg3_reset_hw(struct tg3 *tp) gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | GRC_LCLCTRL_GPIO_OUTPUT3; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; + tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; /* GPIO1 must be driven high for eeprom write protect */ @@ -6191,7 +6199,8 @@ static int tg3_reset_hw(struct tg3 *tp) } /* Enable host coalescing bug fix */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) val |= (1 << 29); tw32_f(WDMAC_MODE, val); @@ -6249,6 +6258,9 @@ static int tg3_reset_hw(struct tg3 *tp) udelay(100); tp->rx_mode = RX_MODE_ENABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); @@ -7907,7 +7919,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) return 0; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ethtool_op_set_tx_hw_csum(dev, data); else ethtool_op_set_tx_csum(dev, data); @@ -8332,7 +8345,8 @@ static int tg3_test_memory(struct tg3 *tp) int i; if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) mem_tbl = mem_tbl_5755; else mem_tbl = mem_tbl_5705; @@ -8924,6 +8938,47 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) } } +static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_4: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; + } +} + static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) { u32 nvcfg1; @@ -8997,6 +9052,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) tg3_get_5752_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) tg3_get_5787_nvram_info(tp); else @@ -9310,6 +9367,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, nvram_cmd |= NVRAM_CMD_LAST; if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -10044,6 +10102,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; @@ -10053,7 +10112,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; } else @@ -10063,6 +10123,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; @@ -10219,6 +10280,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + /* Force the chip into D0. */ err = tg3_set_power_state(tp, PCI_D0); if (err) { @@ -10274,6 +10338,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; @@ -10413,7 +10478,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) /* All chips before 5787 can get confused if TX buffers * straddle the 4GB address boundary in some cases. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) tp->dev->hard_start_xmit = tg3_start_xmit; else tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; @@ -11002,6 +11068,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5752: return "5752"; case PHY_ID_BCM5714: return "5714"; case PHY_ID_BCM5780: return "5780"; + case PHY_ID_BCM5755: return "5755"; case PHY_ID_BCM5787: return "5787"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; @@ -11350,7 +11417,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * checksumming. */ if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) dev->features |= NETIF_F_HW_CSUM; else dev->features |= NETIF_F_IP_CSUM; diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index baa34c4721d..c43cc326420 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -138,6 +138,7 @@ #define ASIC_REV_5752 0x06 #define ASIC_REV_5780 0x08 #define ASIC_REV_5714 0x09 +#define ASIC_REV_5755 0x0a #define ASIC_REV_5787 0x0b #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 @@ -456,6 +457,7 @@ #define RX_MODE_PROMISC 0x00000100 #define RX_MODE_NO_CRC_CHECK 0x00000200 #define RX_MODE_KEEP_VLAN_TAG 0x00000400 +#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 #define MAC_RX_STATUS 0x0000046c #define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 #define RX_STATUS_XOFF_RCVD 0x00000002 @@ -1340,6 +1342,7 @@ #define GRC_LCLCTRL_CLEARINT 0x00000002 #define GRC_LCLCTRL_SETINT 0x00000004 #define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 +#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */ #define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ #define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ #define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 @@ -1441,6 +1444,9 @@ #define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001 #define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 #define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 +#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 #define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 #define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 #define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 @@ -2259,6 +2265,7 @@ struct tg3 { #define PHY_ID_BCM5752 0x60008100 #define PHY_ID_BCM5714 0x60008340 #define PHY_ID_BCM5780 0x60008350 +#define PHY_ID_BCM5755 0xbc050cc0 #define PHY_ID_BCM5787 0xbc050ce0 #define PHY_ID_BCM8002 0x60010140 #define PHY_ID_INVALID 0xffffffff @@ -2286,7 +2293,7 @@ struct tg3 { (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \ (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ - (X) == PHY_ID_BCM8002) + (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM8002) struct tg3_hw_stats *hw_stats; dma_addr_t stats_mapping; diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 78193e4bbdb..330d3869b41 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -38,9 +38,8 @@ void free_cpu_buffers(void) { int i; - for_each_online_cpu(i) { + for_each_online_cpu(i) vfree(cpu_buffer[i].buffer); - } } int alloc_cpu_buffers(void) diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index f187fd8aeed..4d762fc4878 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -11,24 +11,11 @@ config PCI_MSI generate an interrupt using an inbound Memory Write on its PCI bus instead of asserting a device IRQ pin. - If you don't know what to do here, say N. - -config PCI_LEGACY_PROC - bool "Legacy /proc/pci interface" - depends on PCI - ---help--- - This feature enables a procfs file -- /proc/pci -- that provides a - summary of PCI devices in the system. - - This feature has been deprecated as of v2.5.53, in favor of using the - tool lspci(8). This feature may be removed at a future date. + Use of PCI MSI interrupts can be disabled at kernel boot time + by using the 'pci=nomsi' option. This disables MSI for the + entire system. - lspci can provide the same data, as well as much more. lspci is a part of - the pci-utils package, which should be installed by your distribution. - See <file:Documentation/Changes> for information on where to get the latest - version. - - When in doubt, say N. + If you don't know what to do here, say N. config PCI_DEBUG bool "PCI Debugging" diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 3c71e3077ff..421cfffb175 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -22,6 +22,9 @@ ifdef CONFIG_HOTPLUG_PCI_CPCI pci_hotplug-objs += cpci_hotplug_core.o \ cpci_hotplug_pci.o endif +ifdef CONFIG_ACPI +pci_hotplug-objs += acpi_pcihp.o +endif cpqphp-objs := cpqphp_core.o \ cpqphp_ctrl.o \ @@ -37,7 +40,8 @@ ibmphp-objs := ibmphp_core.o \ ibmphp_hpc.o acpiphp-objs := acpiphp_core.o \ - acpiphp_glue.o + acpiphp_glue.o \ + acpiphp_dock.o rpaphp-objs := rpaphp_core.o \ rpaphp_pci.o \ @@ -50,23 +54,9 @@ pciehp-objs := pciehp_core.o \ pciehp_ctrl.o \ pciehp_pci.o \ pciehp_hpc.o -ifdef CONFIG_ACPI - pciehp-objs += pciehprm_acpi.o -else - pciehp-objs += pciehprm_nonacpi.o -endif shpchp-objs := shpchp_core.o \ shpchp_ctrl.o \ shpchp_pci.o \ shpchp_sysfs.o \ shpchp_hpc.o -ifdef CONFIG_ACPI - shpchp-objs += shpchprm_acpi.o -else - ifdef CONFIG_HOTPLUG_PCI_SHPC_PHPRM_LEGACY - shpchp-objs += shpchprm_legacy.o - else - shpchp-objs += shpchprm_nonacpi.o - endif -endif diff --git a/drivers/pci/hotplug/shpchprm_acpi.c b/drivers/pci/hotplug/acpi_pcihp.c index 17145e52223..39af9c325f3 100644 --- a/drivers/pci/hotplug/shpchprm_acpi.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -1,7 +1,7 @@ /* - * SHPCHPRM ACPI: PHP Resource Manager for ACPI platform + * Common ACPI functions for hot plug platforms * - * Copyright (C) 2003-2004 Intel Corporation + * Copyright (C) 2006 Intel Corporation * * All rights reserved. * @@ -31,26 +31,12 @@ #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/actypes.h> -#include "shpchp.h" +#include "pci_hotplug.h" #define METHOD_NAME__SUN "_SUN" #define METHOD_NAME__HPP "_HPP" #define METHOD_NAME_OSHP "OSHP" -static u8 * acpi_path_name( acpi_handle handle) -{ - acpi_status status; - static u8 path_name[ACPI_PATHNAME_MAX]; - struct acpi_buffer ret_buf = { ACPI_PATHNAME_MAX, path_name }; - - memset(path_name, 0, sizeof (path_name)); - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &ret_buf); - - if (ACPI_FAILURE(status)) - return NULL; - else - return path_name; -} static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) @@ -58,18 +44,21 @@ acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) acpi_status status; u8 nui[4]; struct acpi_buffer ret_buf = { 0, NULL}; + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *ext_obj, *package; - u8 *path_name = acpi_path_name(handle); int i, len = 0; + acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); + /* get _hpp */ status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf); switch (status) { case AE_BUFFER_OVERFLOW: ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL); if (!ret_buf.pointer) { - err ("%s:%s alloc for _HPP fail\n", __FUNCTION__, - path_name); + printk(KERN_ERR "%s:%s alloc for _HPP fail\n", + __FUNCTION__, (char *)string.pointer); + acpi_os_free(string.pointer); return AE_NO_MEMORY; } status = acpi_evaluate_object(handle, METHOD_NAME__HPP, @@ -78,16 +67,17 @@ acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) break; default: if (ACPI_FAILURE(status)) { - dbg("%s:%s _HPP fail=0x%x\n", __FUNCTION__, - path_name, status); + pr_debug("%s:%s _HPP fail=0x%x\n", __FUNCTION__, + (char *)string.pointer, status); + acpi_os_free(string.pointer); return status; } } ext_obj = (union acpi_object *) ret_buf.pointer; if (ext_obj->type != ACPI_TYPE_PACKAGE) { - err ("%s:%s _HPP obj not a package\n", __FUNCTION__, - path_name); + printk(KERN_ERR "%s:%s _HPP obj not a package\n", __FUNCTION__, + (char *)string.pointer); status = AE_ERROR; goto free_and_return; } @@ -101,8 +91,8 @@ acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) nui[i] = (u8)ext_obj->integer.value; break; default: - err ("%s:%s _HPP obj type incorrect\n", __FUNCTION__, - path_name); + printk(KERN_ERR "%s:%s _HPP obj type incorrect\n", + __FUNCTION__, (char *)string.pointer); status = AE_ERROR; goto free_and_return; } @@ -113,54 +103,52 @@ acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) hpp->enable_serr = nui[2]; hpp->enable_perr = nui[3]; - dbg(" _HPP: cache_line_size=0x%x\n", hpp->cache_line_size); - dbg(" _HPP: latency timer =0x%x\n", hpp->latency_timer); - dbg(" _HPP: enable SERR =0x%x\n", hpp->enable_serr); - dbg(" _HPP: enable PERR =0x%x\n", hpp->enable_perr); + pr_debug(" _HPP: cache_line_size=0x%x\n", hpp->cache_line_size); + pr_debug(" _HPP: latency timer =0x%x\n", hpp->latency_timer); + pr_debug(" _HPP: enable SERR =0x%x\n", hpp->enable_serr); + pr_debug(" _HPP: enable PERR =0x%x\n", hpp->enable_perr); free_and_return: - kfree(ret_buf.pointer); + acpi_os_free(string.pointer); + acpi_os_free(ret_buf.pointer); return status; } -static void acpi_run_oshp(acpi_handle handle) + + +/* acpi_run_oshp - get control of hotplug from the firmware + * + * @handle - the handle of the hotplug controller. + */ +acpi_status acpi_run_oshp(acpi_handle handle) { acpi_status status; - u8 *path_name = acpi_path_name(handle); + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; + + acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); /* run OSHP */ status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL); - if (ACPI_FAILURE(status)) { - err("%s:%s OSHP fails=0x%x\n", __FUNCTION__, path_name, - status); - } else { - dbg("%s:%s OSHP passes\n", __FUNCTION__, path_name); - } -} - -int shpchprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, u8 busnum, u8 devnum) -{ - int offset = devnum - ctrl->slot_device_offset; + if (ACPI_FAILURE(status)) + printk(KERN_ERR "%s:%s OSHP fails=0x%x\n", __FUNCTION__, + (char *)string.pointer, status); + else + pr_debug("%s:%s OSHP passes\n", __FUNCTION__, + (char *)string.pointer); - dbg("%s: ctrl->slot_num_inc %d, offset %d\n", __FUNCTION__, ctrl->slot_num_inc, offset); - *sun = (u8) (ctrl->first_slot + ctrl->slot_num_inc *offset); - return 0; + acpi_os_free(string.pointer); + return status; } +EXPORT_SYMBOL_GPL(acpi_run_oshp); + -void get_hp_hw_control_from_firmware(struct pci_dev *dev) -{ - /* - * OSHP is an optional ACPI firmware control method. If present, - * we need to run it to inform BIOS that we will control SHPC - * hardware from now on. - */ - acpi_handle handle = DEVICE_ACPI_HANDLE(&(dev->dev)); - if (!handle) - return; - acpi_run_oshp(handle); -} -void get_hp_params_from_firmware(struct pci_dev *dev, +/* acpi_get_hp_params_from_firmware + * + * @dev - the pci_dev of the newly added device + * @hpp - allocated by the caller + */ +acpi_status acpi_get_hp_params_from_firmware(struct pci_dev *dev, struct hotplug_params *hpp) { acpi_status status = AE_NOT_FOUND; @@ -182,5 +170,42 @@ void get_hp_params_from_firmware(struct pci_dev *dev, /* Check if a parent object supports _HPP */ pdev = pdev->bus->parent->self; } + return status; } +EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); + +/* acpi_root_bridge - check to see if this acpi object is a root bridge + * + * @handle - the acpi object in question. + */ +int acpi_root_bridge(acpi_handle handle) +{ + acpi_status status; + struct acpi_device_info *info; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + int i; + + status = acpi_get_object_info(handle, &buffer); + if (ACPI_SUCCESS(status)) { + info = buffer.pointer; + if ((info->valid & ACPI_VALID_HID) && + !strcmp(PCI_ROOT_HID_STRING, + info->hardware_id.value)) { + acpi_os_free(buffer.pointer); + return 1; + } + if (info->valid & ACPI_VALID_CID) { + for (i=0; i < info->compatibility_id.count; i++) { + if (!strcmp(PCI_ROOT_HID_STRING, + info->compatibility_id.id[i].value)) { + acpi_os_free(buffer.pointer); + return 1; + } + } + } + acpi_os_free(buffer.pointer); + } + return 0; +} +EXPORT_SYMBOL_GPL(acpi_root_bridge); diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 293603e1b7c..467ac70a46f 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -37,6 +37,7 @@ #include <linux/acpi.h> #include <linux/kobject.h> /* for KOBJ_NAME_LEN */ +#include <linux/mutex.h> #include "pci_hotplug.h" #define dbg(format, arg...) \ @@ -59,26 +60,10 @@ struct acpiphp_slot; * struct slot - slot information for each *physical* slot */ struct slot { - u8 number; struct hotplug_slot *hotplug_slot; - struct list_head slot_list; - struct acpiphp_slot *acpi_slot; }; -/** - * struct hpp_param - ACPI 2.0 _HPP Hot Plug Parameters - * @cache_line_size in DWORD - * @latency_timer in PCI clock - * @enable_SERR 0 or 1 - * @enable_PERR 0 or 1 - */ -struct hpp_param { - u8 cache_line_size; - u8 latency_timer; - u8 enable_SERR; - u8 enable_PERR; -}; /** @@ -102,7 +87,7 @@ struct acpiphp_bridge { struct pci_dev *pci_dev; /* ACPI 2.0 _HPP parameters */ - struct hpp_param hpp; + struct hotplug_params hpp; spinlock_t res_lock; }; @@ -118,9 +103,9 @@ struct acpiphp_slot { struct acpiphp_bridge *bridge; /* parent */ struct list_head funcs; /* one slot may have different objects (i.e. for each function) */ - struct semaphore crit_sect; + struct slot *slot; + struct mutex crit_sect; - u32 id; /* slot id (serial #) for hotplug core */ u8 device; /* pci device# */ u32 sun; /* ACPI _SUN (slot unique number) */ @@ -160,6 +145,25 @@ struct acpiphp_attention_info struct module *owner; }; + +struct dependent_device { + struct list_head device_list; + struct list_head pci_list; + acpi_handle handle; + struct acpiphp_func *func; +}; + + +struct acpiphp_dock_station { + acpi_handle handle; + u32 last_dock_time; + u32 flags; + struct acpiphp_func *dock_bridge; + struct list_head dependent_devices; + struct list_head pci_dependent_devices; +}; + + /* PCI bus bridge HID */ #define ACPI_PCI_HOST_HID "PNP0A03" @@ -197,19 +201,27 @@ struct acpiphp_attention_info #define FUNC_HAS_PS1 (0x00000020) #define FUNC_HAS_PS2 (0x00000040) #define FUNC_HAS_PS3 (0x00000080) +#define FUNC_HAS_DCK (0x00000100) +#define FUNC_IS_DD (0x00000200) + +/* dock station flags */ +#define DOCK_DOCKING (0x00000001) +#define DOCK_HAS_BRIDGE (0x00000002) /* function prototypes */ /* acpiphp_core.c */ extern int acpiphp_register_attention(struct acpiphp_attention_info*info); extern int acpiphp_unregister_attention(struct acpiphp_attention_info *info); +extern int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot); +extern void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot); /* acpiphp_glue.c */ extern int acpiphp_glue_init (void); extern void acpiphp_glue_exit (void); extern int acpiphp_get_num_slots (void); -extern struct acpiphp_slot *get_slot_from_id (int id); typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data); +void handle_hotplug_event_func(acpi_handle, u32, void*); extern int acpiphp_enable_slot (struct acpiphp_slot *slot); extern int acpiphp_disable_slot (struct acpiphp_slot *slot); @@ -219,6 +231,16 @@ extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot); extern u32 acpiphp_get_address (struct acpiphp_slot *slot); +/* acpiphp_dock.c */ +extern int find_dock_station(void); +extern void remove_dock_station(void); +extern void add_dependent_device(struct dependent_device *new_dd); +extern void add_pci_dependent_device(struct dependent_device *new_dd); +extern struct dependent_device *get_dependent_device(acpi_handle handle); +extern int is_dependent_device(acpi_handle handle); +extern int detect_dependent_devices(acpi_handle *bridge_handle); +extern struct dependent_device *alloc_dependent_device(acpi_handle handle); + /* variables */ extern int acpiphp_debug; diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 60c4c38047a..4f1b0da8e47 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -44,8 +44,6 @@ #include "pci_hotplug.h" #include "acpiphp.h" -static LIST_HEAD(slot_list); - #define MY_NAME "acpiphp" static int debug; @@ -341,62 +339,53 @@ static void release_slot(struct hotplug_slot *hotplug_slot) kfree(slot); } -/** - * init_slots - initialize 'struct slot' structures for each slot - * - */ -static int __init init_slots(void) +/* callback routine to initialize 'struct slot' for each slot */ +int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) { struct slot *slot; + struct hotplug_slot *hotplug_slot; + struct hotplug_slot_info *hotplug_slot_info; int retval = -ENOMEM; - int i; - - for (i = 0; i < num_slots; ++i) { - slot = kmalloc(sizeof(struct slot), GFP_KERNEL); - if (!slot) - goto error; - memset(slot, 0, sizeof(struct slot)); - - slot->hotplug_slot = kmalloc(sizeof(struct hotplug_slot), GFP_KERNEL); - if (!slot->hotplug_slot) - goto error_slot; - memset(slot->hotplug_slot, 0, sizeof(struct hotplug_slot)); - - slot->hotplug_slot->info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); - if (!slot->hotplug_slot->info) - goto error_hpslot; - memset(slot->hotplug_slot->info, 0, sizeof(struct hotplug_slot_info)); - - slot->hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL); - if (!slot->hotplug_slot->name) - goto error_info; - - slot->number = i; - - slot->hotplug_slot->private = slot; - slot->hotplug_slot->release = &release_slot; - slot->hotplug_slot->ops = &acpi_hotplug_slot_ops; - - slot->acpi_slot = get_slot_from_id(i); - slot->hotplug_slot->info->power_status = acpiphp_get_power_status(slot->acpi_slot); - slot->hotplug_slot->info->attention_status = 0; - slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); - slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); - slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; - slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; - - make_slot_name(slot); - - retval = pci_hp_register(slot->hotplug_slot); - if (retval) { - err("pci_hp_register failed with error %d\n", retval); - goto error_name; - } - - /* add slot to our internal list */ - list_add(&slot->slot_list, &slot_list); - info("Slot [%s] registered\n", slot->hotplug_slot->name); - } + + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) + goto error; + + slot->hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); + if (!slot->hotplug_slot) + goto error_slot; + + slot->hotplug_slot->info = kzalloc(sizeof(*hotplug_slot_info), + GFP_KERNEL); + if (!slot->hotplug_slot->info) + goto error_hpslot; + + slot->hotplug_slot->name = kzalloc(SLOT_NAME_SIZE, GFP_KERNEL); + if (!slot->hotplug_slot->name) + goto error_info; + + slot->hotplug_slot->private = slot; + slot->hotplug_slot->release = &release_slot; + slot->hotplug_slot->ops = &acpi_hotplug_slot_ops; + + slot->acpi_slot = acpiphp_slot; + slot->hotplug_slot->info->power_status = acpiphp_get_power_status(slot->acpi_slot); + slot->hotplug_slot->info->attention_status = 0; + slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); + slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); + slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; + slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; + + acpiphp_slot->slot = slot; + make_slot_name(slot); + + retval = pci_hp_register(slot->hotplug_slot); + if (retval) { + err("pci_hp_register failed with error %d\n", retval); + goto error_name; + } + + info("Slot [%s] registered\n", slot->hotplug_slot->name); return 0; error_name: @@ -412,42 +401,51 @@ error: } -static void __exit cleanup_slots (void) +void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot) { - struct list_head *tmp, *n; - struct slot *slot; + struct slot *slot = acpiphp_slot->slot; + int retval = 0; - list_for_each_safe (tmp, n, &slot_list) { - /* memory will be freed in release_slot callback */ - slot = list_entry(tmp, struct slot, slot_list); - list_del(&slot->slot_list); - pci_hp_deregister(slot->hotplug_slot); - } + info ("Slot [%s] unregistered\n", slot->hotplug_slot->name); + + retval = pci_hp_deregister(slot->hotplug_slot); + if (retval) + err("pci_hp_deregister failed with error %d\n", retval); } static int __init acpiphp_init(void) { int retval; + int docking_station; info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); acpiphp_debug = debug; + docking_station = find_dock_station(); + /* read all the ACPI info from the system */ retval = init_acpi(); - if (retval) - return retval; - return init_slots(); + /* if we have found a docking station, we should + * go ahead and load even if init_acpi has found + * no slots. This handles the case when the _DCK + * method not defined under the actual dock bridge + */ + if (docking_station) + return 0; + else + return retval; } static void __exit acpiphp_exit(void) { - cleanup_slots(); /* deallocate internal data structures etc. */ acpiphp_glue_exit(); + + remove_dock_station(); } module_init(acpiphp_init); diff --git a/drivers/pci/hotplug/acpiphp_dock.c b/drivers/pci/hotplug/acpiphp_dock.c new file mode 100644 index 00000000000..4f1aaf12831 --- /dev/null +++ b/drivers/pci/hotplug/acpiphp_dock.c @@ -0,0 +1,438 @@ +/* + * ACPI PCI HotPlug dock functions to ACPI CA subsystem + * + * Copyright (C) 2006 Kristen Carlson Accardi (kristen.c.accardi@intel.com) + * Copyright (C) 2006 Intel Corporation + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to <kristen.c.accardi@intel.com> + * + */ +#include <linux/init.h> +#include <linux/module.h> + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/smp_lock.h> +#include <linux/mutex.h> + +#include "../pci.h" +#include "pci_hotplug.h" +#include "acpiphp.h" + +static struct acpiphp_dock_station *ds; +#define MY_NAME "acpiphp_dock" + + +int is_dependent_device(acpi_handle handle) +{ + return (get_dependent_device(handle) ? 1 : 0); +} + + +static acpi_status +find_dependent_device(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + int *count = (int *)context; + + if (is_dependent_device(handle)) { + (*count)++; + return AE_CTRL_TERMINATE; + } else { + return AE_OK; + } +} + + + + +void add_dependent_device(struct dependent_device *new_dd) +{ + list_add_tail(&new_dd->device_list, &ds->dependent_devices); +} + + +void add_pci_dependent_device(struct dependent_device *new_dd) +{ + list_add_tail(&new_dd->pci_list, &ds->pci_dependent_devices); +} + + + +struct dependent_device * get_dependent_device(acpi_handle handle) +{ + struct dependent_device *dd; + + if (!ds) + return NULL; + + list_for_each_entry(dd, &ds->dependent_devices, device_list) { + if (handle == dd->handle) + return dd; + } + return NULL; +} + + + +struct dependent_device *alloc_dependent_device(acpi_handle handle) +{ + struct dependent_device *dd; + + dd = kzalloc(sizeof(*dd), GFP_KERNEL); + if (dd) { + INIT_LIST_HEAD(&dd->pci_list); + INIT_LIST_HEAD(&dd->device_list); + dd->handle = handle; + } + return dd; +} + + + +static int is_dock(acpi_handle handle) +{ + acpi_status status; + acpi_handle tmp; + + status = acpi_get_handle(handle, "_DCK", &tmp); + if (ACPI_FAILURE(status)) { + return 0; + } + return 1; +} + + + +static int dock_present(void) +{ + unsigned long sta; + acpi_status status; + + if (ds) { + status = acpi_evaluate_integer(ds->handle, "_STA", NULL, &sta); + if (ACPI_SUCCESS(status) && sta) + return 1; + } + return 0; +} + + + +static void eject_dock(void) +{ + struct acpi_object_list arg_list; + union acpi_object arg; + + arg_list.count = 1; + arg_list.pointer = &arg; + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = 1; + + if (ACPI_FAILURE(acpi_evaluate_object(ds->handle, "_EJ0", + &arg_list, NULL)) || dock_present()) + warn("%s: failed to eject dock!\n", __FUNCTION__); + + return; +} + + + + +static acpi_status handle_dock(int dock) +{ + acpi_status status; + struct acpi_object_list arg_list; + union acpi_object arg; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + + dbg("%s: %s\n", __FUNCTION__, dock ? "docking" : "undocking"); + + /* _DCK method has one argument */ + arg_list.count = 1; + arg_list.pointer = &arg; + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = dock; + status = acpi_evaluate_object(ds->handle, "_DCK", + &arg_list, &buffer); + if (ACPI_FAILURE(status)) + err("%s: failed to execute _DCK\n", __FUNCTION__); + acpi_os_free(buffer.pointer); + + return status; +} + + + +static inline void dock(void) +{ + handle_dock(1); +} + + + +static inline void undock(void) +{ + handle_dock(0); +} + + + +/* + * the _DCK method can do funny things... and sometimes not + * hah-hah funny. + * + * TBD - figure out a way to only call fixups for + * systems that require them. + */ +static void post_dock_fixups(void) +{ + struct pci_bus *bus; + u32 buses; + struct dependent_device *dd; + + list_for_each_entry(dd, &ds->pci_dependent_devices, pci_list) { + bus = dd->func->slot->bridge->pci_bus; + + /* fixup bad _DCK function that rewrites + * secondary bridge on slot + */ + pci_read_config_dword(bus->self, + PCI_PRIMARY_BUS, + &buses); + + if (((buses >> 8) & 0xff) != bus->secondary) { + buses = (buses & 0xff000000) + | ((unsigned int)(bus->primary) << 0) + | ((unsigned int)(bus->secondary) << 8) + | ((unsigned int)(bus->subordinate) << 16); + pci_write_config_dword(bus->self, + PCI_PRIMARY_BUS, + buses); + } + } +} + + + +static void hotplug_pci(u32 type) +{ + struct dependent_device *dd; + + list_for_each_entry(dd, &ds->pci_dependent_devices, pci_list) + handle_hotplug_event_func(dd->handle, type, dd->func); +} + + + +static inline void begin_dock(void) +{ + ds->flags |= DOCK_DOCKING; +} + + +static inline void complete_dock(void) +{ + ds->flags &= ~(DOCK_DOCKING); + ds->last_dock_time = jiffies; +} + + +static int dock_in_progress(void) +{ + if (ds->flags & DOCK_DOCKING || + ds->last_dock_time == jiffies) { + dbg("dock in progress\n"); + return 1; + } + return 0; +} + + + +static void +handle_hotplug_event_dock(acpi_handle handle, u32 type, void *context) +{ + dbg("%s: enter\n", __FUNCTION__); + + switch (type) { + case ACPI_NOTIFY_BUS_CHECK: + dbg("BUS Check\n"); + if (!dock_in_progress() && dock_present()) { + begin_dock(); + dock(); + if (!dock_present()) { + err("Unable to dock!\n"); + break; + } + post_dock_fixups(); + hotplug_pci(type); + complete_dock(); + } + break; + case ACPI_NOTIFY_EJECT_REQUEST: + dbg("EJECT request\n"); + if (!dock_in_progress() && dock_present()) { + hotplug_pci(type); + undock(); + eject_dock(); + if (dock_present()) + err("Unable to undock!\n"); + } + break; + } +} + + + + +static acpi_status +find_dock_ejd(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + acpi_status status; + acpi_handle tmp; + acpi_handle dck_handle = (acpi_handle) context; + char objname[64]; + struct acpi_buffer buffer = { .length = sizeof(objname), + .pointer = objname }; + struct acpi_buffer ejd_buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *ejd_obj; + + status = acpi_get_handle(handle, "_EJD", &tmp); + if (ACPI_FAILURE(status)) + return AE_OK; + + /* make sure we are dependent on the dock device, + * by executing the _EJD method, then getting a handle + * to the device referenced by that name. If that + * device handle is the same handle as the dock station + * handle, then we are a device dependent on the dock station + */ + acpi_get_name(dck_handle, ACPI_FULL_PATHNAME, &buffer); + status = acpi_evaluate_object(handle, "_EJD", NULL, &ejd_buffer); + if (ACPI_FAILURE(status)) { + err("Unable to execute _EJD!\n"); + goto find_ejd_out; + } + ejd_obj = ejd_buffer.pointer; + status = acpi_get_handle(NULL, ejd_obj->string.pointer, &tmp); + if (ACPI_FAILURE(status)) + goto find_ejd_out; + + if (tmp == dck_handle) { + struct dependent_device *dd; + dbg("%s: found device dependent on dock\n", __FUNCTION__); + dd = alloc_dependent_device(handle); + if (!dd) { + err("Can't allocate memory for dependent device!\n"); + goto find_ejd_out; + } + add_dependent_device(dd); + } + +find_ejd_out: + acpi_os_free(ejd_buffer.pointer); + return AE_OK; +} + + + +int detect_dependent_devices(acpi_handle *bridge_handle) +{ + acpi_status status; + int count; + + count = 0; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, + (u32)1, find_dependent_device, + (void *)&count, NULL); + + return count; +} + + + + + +static acpi_status +find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + int *count = (int *)context; + + if (is_dock(handle)) { + dbg("%s: found dock\n", __FUNCTION__); + ds = kzalloc(sizeof(*ds), GFP_KERNEL); + ds->handle = handle; + INIT_LIST_HEAD(&ds->dependent_devices); + INIT_LIST_HEAD(&ds->pci_dependent_devices); + + /* look for devices dependent on dock station */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, find_dock_ejd, handle, NULL); + + acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, + handle_hotplug_event_dock, ds); + (*count)++; + } + + return AE_OK; +} + + + + +int find_dock_station(void) +{ + int num = 0; + + ds = NULL; + + /* start from the root object, because some laptops define + * _DCK methods outside the scope of PCI (IBM x-series laptop) + */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, find_dock, &num, NULL); + + return num; +} + + + +void remove_dock_station(void) +{ + struct dependent_device *dd, *tmp; + if (ds) { + if (ACPI_FAILURE(acpi_remove_notify_handler(ds->handle, + ACPI_SYSTEM_NOTIFY, handle_hotplug_event_dock))) + err("failed to remove dock notify handler\n"); + + /* free all dependent devices */ + list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, + device_list) + kfree(dd); + + /* no need to touch the pci_dependent_device list, + * cause all memory was freed above + */ + kfree(ds); + } +} + + diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 509a5b3ae99..053ee843863 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -46,7 +46,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/smp_lock.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include "../pci.h" #include "pci_hotplug.h" @@ -57,7 +57,6 @@ static LIST_HEAD(bridge_list); #define MY_NAME "acpiphp_glue" static void handle_hotplug_event_bridge (acpi_handle, u32, void *); -static void handle_hotplug_event_func (acpi_handle, u32, void *); static void acpiphp_sanitize_bus(struct pci_bus *bus); static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); @@ -125,11 +124,11 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) struct acpiphp_bridge *bridge = (struct acpiphp_bridge *)context; struct acpiphp_slot *slot; struct acpiphp_func *newfunc; + struct dependent_device *dd; acpi_handle tmp; acpi_status status = AE_OK; unsigned long adr, sun; - int device, function; - static int num_slots = 0; /* XXX if we support I/O node hotplug... */ + int device, function, retval; status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); @@ -138,21 +137,21 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) status = acpi_get_handle(handle, "_EJ0", &tmp); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status) && !(is_dependent_device(handle))) return AE_OK; device = (adr >> 16) & 0xffff; function = adr & 0xffff; - newfunc = kmalloc(sizeof(struct acpiphp_func), GFP_KERNEL); + newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL); if (!newfunc) return AE_NO_MEMORY; - memset(newfunc, 0, sizeof(struct acpiphp_func)); INIT_LIST_HEAD(&newfunc->sibling); newfunc->handle = handle; newfunc->function = function; - newfunc->flags = FUNC_HAS_EJ0; + if (ACPI_SUCCESS(status)) + newfunc->flags = FUNC_HAS_EJ0; if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) newfunc->flags |= FUNC_HAS_STA; @@ -163,6 +162,19 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS3", &tmp))) newfunc->flags |= FUNC_HAS_PS3; + if (ACPI_SUCCESS(acpi_get_handle(handle, "_DCK", &tmp))) { + newfunc->flags |= FUNC_HAS_DCK; + /* add to devices dependent on dock station, + * because this may actually be the dock bridge + */ + dd = alloc_dependent_device(handle); + if (!dd) + err("Can't allocate memory for " + "new dependent device!\n"); + else + add_dependent_device(dd); + } + status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun); if (ACPI_FAILURE(status)) sun = -1; @@ -176,19 +188,17 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) } if (!slot) { - slot = kmalloc(sizeof(struct acpiphp_slot), GFP_KERNEL); + slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL); if (!slot) { kfree(newfunc); return AE_NO_MEMORY; } - memset(slot, 0, sizeof(struct acpiphp_slot)); slot->bridge = bridge; - slot->id = num_slots++; slot->device = device; slot->sun = sun; INIT_LIST_HEAD(&slot->funcs); - init_MUTEX(&slot->crit_sect); + mutex_init(&slot->crit_sect); slot->next = bridge->slots; bridge->slots = slot; @@ -198,6 +208,11 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n", slot->sun, pci_domain_nr(bridge->pci_bus), bridge->pci_bus->number, slot->device); + retval = acpiphp_register_hotplug_slot(slot); + if (retval) { + warn("acpiphp_register_hotplug_slot failed(err code = 0x%x)\n", retval); + goto err_exit; + } } newfunc->slot = slot; @@ -210,16 +225,41 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); } + /* if this is a device dependent on a dock station, + * associate the acpiphp_func to the dependent_device + * struct. + */ + if ((dd = get_dependent_device(handle))) { + newfunc->flags |= FUNC_IS_DD; + /* + * we don't want any devices which is dependent + * on the dock to have it's _EJ0 method executed. + * because we need to run _DCK first. + */ + newfunc->flags &= ~FUNC_HAS_EJ0; + dd->func = newfunc; + add_pci_dependent_device(dd); + } + /* install notify handler */ - status = acpi_install_notify_handler(handle, + if (!(newfunc->flags & FUNC_HAS_DCK)) { + status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_func, newfunc); - if (ACPI_FAILURE(status)) { - err("failed to register interrupt notify handler\n"); - return status; - } + if (ACPI_FAILURE(status)) + err("failed to register interrupt notify handler\n"); + } else + status = AE_OK; + + return status; + + err_exit: + bridge->nr_slots--; + bridge->slots = slot->next; + kfree(slot); + kfree(newfunc); return AE_OK; } @@ -245,57 +285,19 @@ static int detect_ejectable_slots(acpi_handle *bridge_handle) static void decode_hpp(struct acpiphp_bridge *bridge) { acpi_status status; - struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER, - .pointer = NULL}; - union acpi_object *package; - int i; - - /* default numbers */ - bridge->hpp.cache_line_size = 0x10; - bridge->hpp.latency_timer = 0x40; - bridge->hpp.enable_SERR = 0; - bridge->hpp.enable_PERR = 0; - - status = acpi_evaluate_object(bridge->handle, "_HPP", NULL, &buffer); + status = acpi_get_hp_params_from_firmware(bridge->pci_dev, &bridge->hpp); if (ACPI_FAILURE(status)) { - dbg("_HPP evaluation failed\n"); - return; + /* use default numbers */ + bridge->hpp.cache_line_size = 0x10; + bridge->hpp.latency_timer = 0x40; + bridge->hpp.enable_serr = 0; + bridge->hpp.enable_perr = 0; } - - package = (union acpi_object *) buffer.pointer; - - if (!package || package->type != ACPI_TYPE_PACKAGE || - package->package.count != 4 || !package->package.elements) { - err("invalid _HPP object; ignoring\n"); - goto err_exit; - } - - for (i = 0; i < 4; i++) { - if (package->package.elements[i].type != ACPI_TYPE_INTEGER) { - err("invalid _HPP parameter type; ignoring\n"); - goto err_exit; - } - } - - bridge->hpp.cache_line_size = package->package.elements[0].integer.value; - bridge->hpp.latency_timer = package->package.elements[1].integer.value; - bridge->hpp.enable_SERR = package->package.elements[2].integer.value; - bridge->hpp.enable_PERR = package->package.elements[3].integer.value; - - dbg("_HPP parameter = (%02x, %02x, %02x, %02x)\n", - bridge->hpp.cache_line_size, - bridge->hpp.latency_timer, - bridge->hpp.enable_SERR, - bridge->hpp.enable_PERR); - - bridge->flags |= BRIDGE_HAS_HPP; - - err_exit: - kfree(buffer.pointer); } + /* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */ static void init_bridge_misc(struct acpiphp_bridge *bridge) { @@ -304,9 +306,16 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge) /* decode ACPI 2.0 _HPP (hot plug parameters) */ decode_hpp(bridge); + /* must be added to the list prior to calling register_slot */ + list_add(&bridge->list, &bridge_list); + /* register all slot objects under this bridge */ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1, register_slot, bridge, NULL); + if (ACPI_FAILURE(status)) { + list_del(&bridge->list); + return; + } /* install notify handler */ if (bridge->type != BRIDGE_TYPE_HOST) { @@ -319,8 +328,6 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge) err("failed to register interrupt notify handler\n"); } } - - list_add(&bridge->list, &bridge_list); } @@ -329,12 +336,10 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus) { struct acpiphp_bridge *bridge; - bridge = kmalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); + bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); if (bridge == NULL) return; - memset(bridge, 0, sizeof(struct acpiphp_bridge)); - bridge->type = BRIDGE_TYPE_HOST; bridge->handle = handle; @@ -351,14 +356,12 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev) { struct acpiphp_bridge *bridge; - bridge = kmalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); + bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); if (bridge == NULL) { err("out of memory\n"); return; } - memset(bridge, 0, sizeof(struct acpiphp_bridge)); - bridge->type = BRIDGE_TYPE_P2P; bridge->handle = handle; @@ -410,11 +413,18 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) goto out; /* check if this bridge has ejectable slots */ - if (detect_ejectable_slots(handle) > 0) { + if ((detect_ejectable_slots(handle) > 0) || + (detect_dependent_devices(handle) > 0)) { dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); add_p2p_bridge(handle, dev); } + /* search P2P bridges under this p2p bridge */ + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + find_p2p_bridge, dev->subordinate, NULL); + if (ACPI_FAILURE(status)) + warn("find_p2p_bridge faied (error code = 0x%x)\n", status); + out: pci_dev_put(dev); return AE_OK; @@ -512,15 +522,19 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) list_for_each_safe (list, tmp, &slot->funcs) { struct acpiphp_func *func; func = list_entry(list, struct acpiphp_func, sibling); - status = acpi_remove_notify_handler(func->handle, + if (!(func->flags & FUNC_HAS_DCK)) { + status = acpi_remove_notify_handler(func->handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_func); - if (ACPI_FAILURE(status)) - err("failed to remove notify handler\n"); + if (ACPI_FAILURE(status)) + err("failed to remove notify handler\n"); + } pci_dev_put(func->pci_dev); list_del(list); kfree(func); } + acpiphp_unregister_hotplug_slot(slot); + list_del(&slot->funcs); kfree(slot); slot = next; } @@ -551,7 +565,8 @@ static void remove_bridge(acpi_handle handle) } else { /* clean-up p2p bridges under this host bridge */ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, - (u32)1, cleanup_p2p_bridge, NULL, NULL); + ACPI_UINT32_MAX, cleanup_p2p_bridge, + NULL, NULL); } } @@ -751,6 +766,113 @@ static int power_off_slot(struct acpiphp_slot *slot) } + +/** + * acpiphp_max_busnr - return the highest reserved bus number under + * the given bus. + * @bus: bus to start search with + * + */ +static unsigned char acpiphp_max_busnr(struct pci_bus *bus) +{ + struct list_head *tmp; + unsigned char max, n; + + /* + * pci_bus_max_busnr will return the highest + * reserved busnr for all these children. + * that is equivalent to the bus->subordinate + * value. We don't want to use the parent's + * bus->subordinate value because it could have + * padding in it. + */ + max = bus->secondary; + + list_for_each(tmp, &bus->children) { + n = pci_bus_max_busnr(pci_bus_b(tmp)); + if (n > max) + max = n; + } + return max; +} + + + +/** + * get_func - get a pointer to acpiphp_func given a slot, device + * @slot: slot to search + * @dev: pci_dev struct to match. + * + * This function will increase the reference count of pci_dev, + * so callers should call pci_dev_put when complete. + * + */ +static struct acpiphp_func * +get_func(struct acpiphp_slot *slot, struct pci_dev *dev) +{ + struct acpiphp_func *func = NULL; + struct pci_bus *bus = slot->bridge->pci_bus; + struct pci_dev *pdev; + + list_for_each_entry(func, &slot->funcs, sibling) { + pdev = pci_get_slot(bus, PCI_DEVFN(slot->device, + func->function)); + if (pdev) { + if (pdev == dev) + break; + pci_dev_put(pdev); + } + } + return func; +} + + +/** + * acpiphp_bus_add - add a new bus to acpi subsystem + * @func: acpiphp_func of the bridge + * + */ +static int acpiphp_bus_add(struct acpiphp_func *func) +{ + acpi_handle phandle; + struct acpi_device *device, *pdevice; + int ret_val; + + acpi_get_parent(func->handle, &phandle); + if (acpi_bus_get_device(phandle, &pdevice)) { + dbg("no parent device, assuming NULL\n"); + pdevice = NULL; + } + if (!acpi_bus_get_device(func->handle, &device)) { + dbg("bus exists... trim\n"); + /* this shouldn't be in here, so remove + * the bus then re-add it... + */ + ret_val = acpi_bus_trim(device, 1); + dbg("acpi_bus_trim return %x\n", ret_val); + } + + ret_val = acpi_bus_add(&device, pdevice, func->handle, + ACPI_BUS_TYPE_DEVICE); + if (ret_val) { + dbg("error adding bus, %x\n", + -ret_val); + goto acpiphp_bus_add_out; + } + /* + * try to start anyway. We could have failed to add + * simply because this bus had previously been added + * on another add. Don't bother with the return value + * we just keep going. + */ + ret_val = acpi_bus_start(device); + +acpiphp_bus_add_out: + return ret_val; +} + + + /** * enable_device - enable, configure a slot * @slot: slot to be enabled @@ -788,7 +910,7 @@ static int enable_device(struct acpiphp_slot *slot) goto err_exit; } - max = bus->secondary; + max = acpiphp_max_busnr(bus); for (pass = 0; pass < 2; pass++) { list_for_each_entry(dev, &bus->devices, bus_list) { if (PCI_SLOT(dev->devfn) != slot->device) @@ -796,8 +918,15 @@ static int enable_device(struct acpiphp_slot *slot) if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { max = pci_scan_bridge(bus, dev, max, pass); - if (pass && dev->subordinate) + if (pass && dev->subordinate) { pci_bus_size_bridges(dev->subordinate); + func = get_func(slot, dev); + if (func) { + acpiphp_bus_add(func); + /* side effect of get_func */ + pci_dev_put(dev); + } + } } } } @@ -806,8 +935,8 @@ static int enable_device(struct acpiphp_slot *slot) acpiphp_sanitize_bus(bus); pci_enable_bridges(bus); pci_bus_add_devices(bus); - acpiphp_set_hpp_values(DEVICE_ACPI_HANDLE(&bus->self->dev), bus); - acpiphp_configure_ioapics(DEVICE_ACPI_HANDLE(&bus->self->dev)); + acpiphp_set_hpp_values(slot->bridge->handle, bus); + acpiphp_configure_ioapics(slot->bridge->handle); /* associate pci_dev to our representation */ list_for_each (l, &slot->funcs) { @@ -987,11 +1116,11 @@ static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge) pci_write_config_byte(dev, PCI_LATENCY_TIMER, bridge->hpp.latency_timer); pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); - if (bridge->hpp.enable_SERR) + if (bridge->hpp.enable_serr) pci_cmd |= PCI_COMMAND_SERR; else pci_cmd &= ~PCI_COMMAND_SERR; - if (bridge->hpp.enable_PERR) + if (bridge->hpp.enable_perr) pci_cmd |= PCI_COMMAND_PARITY; else pci_cmd &= ~PCI_COMMAND_PARITY; @@ -1002,11 +1131,11 @@ static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge) pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, bridge->hpp.latency_timer); pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); - if (bridge->hpp.enable_SERR) + if (bridge->hpp.enable_serr) pci_bctl |= PCI_BRIDGE_CTL_SERR; else pci_bctl &= ~PCI_BRIDGE_CTL_SERR; - if (bridge->hpp.enable_PERR) + if (bridge->hpp.enable_perr) pci_bctl |= PCI_BRIDGE_CTL_PARITY; else pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; @@ -1026,6 +1155,7 @@ static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus) memset(&bridge, 0, sizeof(bridge)); bridge.handle = handle; + bridge.pci_dev = bus->self; decode_hpp(&bridge); list_for_each_entry(dev, &bus->devices, bus_list) program_hpp(dev, &bridge); @@ -1200,7 +1330,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont * handles ACPI event notification on slots * */ -static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context) +void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context) { struct acpiphp_func *func; char objname[64]; @@ -1242,41 +1372,13 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex } } -static int is_root_bridge(acpi_handle handle) -{ - acpi_status status; - struct acpi_device_info *info; - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - int i; - - status = acpi_get_object_info(handle, &buffer); - if (ACPI_SUCCESS(status)) { - info = buffer.pointer; - if ((info->valid & ACPI_VALID_HID) && - !strcmp(PCI_ROOT_HID_STRING, - info->hardware_id.value)) { - acpi_os_free(buffer.pointer); - return 1; - } - if (info->valid & ACPI_VALID_CID) { - for (i=0; i < info->compatibility_id.count; i++) { - if (!strcmp(PCI_ROOT_HID_STRING, - info->compatibility_id.id[i].value)) { - acpi_os_free(buffer.pointer); - return 1; - } - } - } - } - return 0; -} static acpi_status find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) { int *count = (int *)context; - if (is_root_bridge(handle)) { + if (acpi_root_bridge(handle)) { acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, handle_hotplug_event_bridge, NULL); (*count)++; @@ -1373,26 +1475,6 @@ static int acpiphp_for_each_slot(acpiphp_callback fn, void *data) } #endif -/* search matching slot from id */ -struct acpiphp_slot *get_slot_from_id(int id) -{ - struct list_head *node; - struct acpiphp_bridge *bridge; - struct acpiphp_slot *slot; - - list_for_each (node, &bridge_list) { - bridge = (struct acpiphp_bridge *)node; - for (slot = bridge->slots; slot; slot = slot->next) - if (slot->id == id) - return slot; - } - - /* should never happen! */ - err("%s: no object for id %d\n", __FUNCTION__, id); - WARN_ON(1); - return NULL; -} - /** * acpiphp_enable_slot - power on slot @@ -1401,7 +1483,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) { int retval; - down(&slot->crit_sect); + mutex_lock(&slot->crit_sect); /* wake up all functions */ retval = power_on_slot(slot); @@ -1413,7 +1495,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) retval = enable_device(slot); err_exit: - up(&slot->crit_sect); + mutex_unlock(&slot->crit_sect); return retval; } @@ -1424,7 +1506,7 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot) { int retval = 0; - down(&slot->crit_sect); + mutex_lock(&slot->crit_sect); /* unconfigure all functions */ retval = disable_device(slot); @@ -1437,7 +1519,7 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot) goto err_exit; err_exit: - up(&slot->crit_sect); + mutex_unlock(&slot->crit_sect); return retval; } diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 30af105271a..037ce4c9168 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -248,22 +248,19 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) * with the pci_hotplug subsystem. */ for (i = first; i <= last; ++i) { - slot = kmalloc(sizeof (struct slot), GFP_KERNEL); + slot = kzalloc(sizeof (struct slot), GFP_KERNEL); if (!slot) goto error; - memset(slot, 0, sizeof (struct slot)); hotplug_slot = - kmalloc(sizeof (struct hotplug_slot), GFP_KERNEL); + kzalloc(sizeof (struct hotplug_slot), GFP_KERNEL); if (!hotplug_slot) goto error_slot; - memset(hotplug_slot, 0, sizeof (struct hotplug_slot)); slot->hotplug_slot = hotplug_slot; - info = kmalloc(sizeof (struct hotplug_slot_info), GFP_KERNEL); + info = kzalloc(sizeof (struct hotplug_slot_info), GFP_KERNEL); if (!info) goto error_hpslot; - memset(info, 0, sizeof (struct hotplug_slot_info)); hotplug_slot->info = info; name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL); diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index cb88404c89f..c74e9e37e76 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h @@ -32,6 +32,7 @@ #include <linux/interrupt.h> #include <asm/io.h> /* for read? and write? functions */ #include <linux/delay.h> /* for delays */ +#include <linux/mutex.h> #define MY_NAME "cpqphp" @@ -286,7 +287,7 @@ struct event_info { struct controller { struct controller *next; u32 ctrl_int_comp; - struct semaphore crit_sect; /* critical section semaphore */ + struct mutex crit_sect; /* critical section mutex */ void __iomem *hpc_reg; /* cookie for our pci controller location */ struct pci_resource *mem_head; struct pci_resource *p_mem_head; diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index b3659ffccac..9bc1deb8df5 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -347,26 +347,22 @@ static int ctrl_slot_setup(struct controller *ctrl, slot_number = ctrl->first_slot; while (number_of_slots) { - slot = kmalloc(sizeof(*slot), GFP_KERNEL); + slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) goto error; - memset(slot, 0, sizeof(struct slot)); - slot->hotplug_slot = kmalloc(sizeof(*(slot->hotplug_slot)), + slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)), GFP_KERNEL); if (!slot->hotplug_slot) goto error_slot; hotplug_slot = slot->hotplug_slot; - memset(hotplug_slot, 0, sizeof(struct hotplug_slot)); hotplug_slot->info = - kmalloc(sizeof(*(hotplug_slot->info)), + kzalloc(sizeof(*(hotplug_slot->info)), GFP_KERNEL); if (!hotplug_slot->info) goto error_hpslot; hotplug_slot_info = hotplug_slot->info; - memset(hotplug_slot_info, 0, - sizeof(struct hotplug_slot_info)); hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL); if (!hotplug_slot->name) @@ -599,7 +595,7 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func, hp_slot = func->device - ctrl->slot_device_offset; // Wait for exclusive access to hardware - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); if (status == 1) { amber_LED_on (ctrl, hp_slot); @@ -607,7 +603,7 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func, amber_LED_off (ctrl, hp_slot); } else { // Done with exclusive hardware access - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return(1); } @@ -617,7 +613,7 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func, wait_for_ctrl_irq (ctrl); // Done with exclusive hardware access - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return(0); } @@ -854,13 +850,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - ctrl = (struct controller *) kmalloc(sizeof(struct controller), GFP_KERNEL); + ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL); if (!ctrl) { err("%s : out of memory\n", __FUNCTION__); rc = -ENOMEM; goto err_disable_device; } - memset(ctrl, 0, sizeof(struct controller)); rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid); if (rc) { @@ -1084,7 +1079,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dbg("bus device function rev: %d %d %d %d\n", ctrl->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), ctrl->rev); - init_MUTEX(&ctrl->crit_sect); + mutex_init(&ctrl->crit_sect); init_waitqueue_head(&ctrl->queue); /* initialize our threads if they haven't already been started up */ @@ -1223,7 +1218,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) // turn off empty slots here unless command line option "ON" set // Wait for exclusive access to hardware - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; @@ -1270,12 +1265,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) rc = init_SERR(ctrl); if (rc) { err("init_SERR failed\n"); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); goto err_free_irq; } // Done with exclusive hardware access - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); cpqhp_create_debugfs_files(ctrl); diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 771ed34b181..55d2dc7e39c 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1282,9 +1282,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) u8 hp_slot; u8 temp_byte; u8 adapter_speed; - u32 index; u32 rc = 0; - u32 src = 8; hp_slot = func->device - ctrl->slot_device_offset; @@ -1299,7 +1297,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) **********************************/ rc = CARD_FUNCTIONING; } else { - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power (ctrl, hp_slot); @@ -1333,12 +1331,12 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); if (rc) return rc; - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); slot_enable (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); @@ -1350,7 +1348,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); /* Wait for ~1 second because of hot plug spec */ long_delay(1*HZ); @@ -1368,76 +1366,30 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) rc = cpqhp_configure_board(ctrl, func); - if (rc || src) { - /* If configuration fails, turn it off - * Get slot won't work for devices behind - * bridges, but in this case it will always be - * called for the "base" bus/dev/func of an - * adapter. */ + /* If configuration fails, turn it off + * Get slot won't work for devices behind + * bridges, but in this case it will always be + * called for the "base" bus/dev/func of an + * adapter. */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); - amber_LED_on (ctrl, hp_slot); - green_LED_off (ctrl, hp_slot); - slot_disable (ctrl, hp_slot); - - set_SOGO(ctrl); - - /* Wait for SOBS to be unset */ - wait_for_ctrl_irq (ctrl); - - up(&ctrl->crit_sect); - - if (rc) - return rc; - else - return 1; - } - - func->status = 0; - func->switch_save = 0x10; - - index = 1; - while (((func = cpqhp_slot_find(func->bus, func->device, index)) != NULL) && !rc) { - rc |= cpqhp_configure_board(ctrl, func); - index++; - } - - if (rc) { - /* If configuration fails, turn it off - * Get slot won't work for devices behind - * bridges, but in this case it will always be - * called for the "base" bus/dev/func of an - * adapter. */ - - down(&ctrl->crit_sect); - - amber_LED_on (ctrl, hp_slot); - green_LED_off (ctrl, hp_slot); - slot_disable (ctrl, hp_slot); - - set_SOGO(ctrl); - - /* Wait for SOBS to be unset */ - wait_for_ctrl_irq (ctrl); - - up(&ctrl->crit_sect); - - return rc; - } - /* Done configuring so turn LED on full time */ - - down(&ctrl->crit_sect); - - green_LED_on (ctrl, hp_slot); + amber_LED_on (ctrl, hp_slot); + green_LED_off (ctrl, hp_slot); + slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); - rc = 0; + mutex_unlock(&ctrl->crit_sect); + + if (rc) + return rc; + else + return 1; + } else { /* Something is wrong @@ -1445,7 +1397,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) * in this case it will always be called for the "base" * bus/dev/func of an adapter. */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); @@ -1456,7 +1408,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); } } @@ -1488,7 +1440,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", __FUNCTION__, func->device, ctrl->slot_device_offset, hp_slot); - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power(ctrl, hp_slot); @@ -1522,7 +1474,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); if (rc) return rc; @@ -1532,7 +1484,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) /* turn on board and blink green LED */ dbg("%s: before down\n", __FUNCTION__); - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); dbg("%s: after down\n", __FUNCTION__); dbg("%s: before slot_enable\n", __FUNCTION__); @@ -1553,7 +1505,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) dbg("%s: after wait_for_ctrl_irq\n", __FUNCTION__); dbg("%s: before up\n", __FUNCTION__); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); dbg("%s: after up\n", __FUNCTION__); /* Wait for ~1 second because of hot plug spec */ @@ -1607,7 +1559,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) { - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); @@ -1618,7 +1570,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return rc; } else { cpqhp_save_slot_config(ctrl, func); @@ -1640,7 +1592,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) } } while (new_slot); - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); green_LED_on (ctrl, hp_slot); @@ -1649,9 +1601,9 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); } else { - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); @@ -1662,7 +1614,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return rc; } @@ -1721,7 +1673,7 @@ static u32 remove_board(struct pci_func * func, u32 replace_flag, struct control func->status = 0x01; func->configured = 0; - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); @@ -1736,7 +1688,7 @@ static u32 remove_board(struct pci_func * func, u32 replace_flag, struct control /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); if (!replace_flag && ctrl->add_support) { while (func) { @@ -1899,7 +1851,7 @@ static void interrupt_event_handler(struct controller *ctrl) dbg("button cancel\n"); del_timer(&p_slot->task_event); - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); if (p_slot->state == BLINKINGOFF_STATE) { /* slot is on */ @@ -1922,7 +1874,7 @@ static void interrupt_event_handler(struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); } /*** button Released (No action on press...) */ else if (ctrl->event_queue[loop].event_type == INT_BUTTON_RELEASE) { @@ -1937,7 +1889,7 @@ static void interrupt_event_handler(struct controller *ctrl) p_slot->state = BLINKINGON_STATE; info(msg_button_on, p_slot->number); } - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); dbg("blink green LED and turn off amber\n"); @@ -1949,7 +1901,7 @@ static void interrupt_event_handler(struct controller *ctrl) /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); init_timer(&p_slot->task_event); p_slot->hp_slot = hp_slot; p_slot->ctrl = ctrl; diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c index 060d74775d7..71b80c23e8c 100644 --- a/drivers/pci/hotplug/fakephp.c +++ b/drivers/pci/hotplug/fakephp.c @@ -95,15 +95,13 @@ static int add_slot(struct pci_dev *dev) struct hotplug_slot *slot; int retval = -ENOMEM; - slot = kmalloc(sizeof(struct hotplug_slot), GFP_KERNEL); + slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!slot) goto error; - memset(slot, 0, sizeof(*slot)); - slot->info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); + slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!slot->info) goto error_slot; - memset(slot->info, 0, sizeof(struct hotplug_slot_info)); slot->info->power_status = 1; slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; @@ -227,11 +225,10 @@ static void pci_rescan_bus(const struct pci_bus *bus) { unsigned int devfn; struct pci_dev *dev; - dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); + dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); if (!dev) return; - memset(dev, 0, sizeof(dev)); dev->bus = (struct pci_bus*)bus; dev->sysdata = bus->sysdata; for (devfn = 0; devfn < 0x100; devfn += 8) { diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h index c22e0284d7b..dba6d8ca9bd 100644 --- a/drivers/pci/hotplug/ibmphp.h +++ b/drivers/pci/hotplug/ibmphp.h @@ -406,8 +406,6 @@ extern void ibmphp_hpc_stop_poll_thread (void); //---------------------------------------------------------------------------- // HPC return codes //---------------------------------------------------------------------------- -#define FALSE 0x00 -#define TRUE 0x01 #define HPC_ERROR 0xFF //----------------------------------------------------------------------------- diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index dc59da675c0..e13d5b87241 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -1141,7 +1141,7 @@ static int enable_slot(struct hotplug_slot *hs) goto error_power; } - slot_cur->func = kmalloc(sizeof(struct pci_func), GFP_KERNEL); + slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { /* We cannot do update_slot_info here, since no memory for * kmalloc n.e.ways, and update_slot_info allocates some */ @@ -1149,7 +1149,6 @@ static int enable_slot(struct hotplug_slot *hs) rc = -ENOMEM; goto error_power; } - memset(slot_cur->func, 0, sizeof(struct pci_func)); slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; for (i = 0; i < 4; i++) @@ -1240,9 +1239,9 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) } flag = slot_cur->flag; - slot_cur->flag = TRUE; + slot_cur->flag = 1; - if (flag == TRUE) { + if (flag == 1) { rc = validate(slot_cur, DISABLE); /* checking if powered off already & valid slot # */ if (rc) @@ -1252,13 +1251,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) if (slot_cur->func == NULL) { /* We need this for fncs's that were there on bootup */ - slot_cur->func = kmalloc(sizeof(struct pci_func), GFP_KERNEL); + slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { err("out of system memory\n"); rc = -ENOMEM; goto error; } - memset(slot_cur->func, 0, sizeof(struct pci_func)); slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; } diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index aea1187c73a..05e4f5a1927 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -72,13 +72,7 @@ static int ebda_rio_table (void); static struct ebda_hpc_list * __init alloc_ebda_hpc_list (void) { - struct ebda_hpc_list *list; - - list = kmalloc (sizeof (struct ebda_hpc_list), GFP_KERNEL); - if (!list) - return NULL; - memset (list, 0, sizeof (*list)); - return list; + return kzalloc(sizeof(struct ebda_hpc_list), GFP_KERNEL); } static struct controller *alloc_ebda_hpc (u32 slot_count, u32 bus_count) @@ -87,21 +81,18 @@ static struct controller *alloc_ebda_hpc (u32 slot_count, u32 bus_count) struct ebda_hpc_slot *slots; struct ebda_hpc_bus *buses; - controller = kmalloc (sizeof (struct controller), GFP_KERNEL); + controller = kzalloc(sizeof(struct controller), GFP_KERNEL); if (!controller) goto error; - memset (controller, 0, sizeof (*controller)); - slots = kmalloc (sizeof (struct ebda_hpc_slot) * slot_count, GFP_KERNEL); + slots = kcalloc(slot_count, sizeof(struct ebda_hpc_slot), GFP_KERNEL); if (!slots) goto error_contr; - memset (slots, 0, sizeof (*slots) * slot_count); controller->slots = slots; - buses = kmalloc (sizeof (struct ebda_hpc_bus) * bus_count, GFP_KERNEL); + buses = kcalloc(bus_count, sizeof(struct ebda_hpc_bus), GFP_KERNEL); if (!buses) goto error_slots; - memset (buses, 0, sizeof (*buses) * bus_count); controller->buses = buses; return controller; @@ -122,24 +113,12 @@ static void free_ebda_hpc (struct controller *controller) static struct ebda_rsrc_list * __init alloc_ebda_rsrc_list (void) { - struct ebda_rsrc_list *list; - - list = kmalloc (sizeof (struct ebda_rsrc_list), GFP_KERNEL); - if (!list) - return NULL; - memset (list, 0, sizeof (*list)); - return list; + return kzalloc(sizeof(struct ebda_rsrc_list), GFP_KERNEL); } static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void) { - struct ebda_pci_rsrc *resource; - - resource = kmalloc (sizeof (struct ebda_pci_rsrc), GFP_KERNEL); - if (!resource) - return NULL; - memset (resource, 0, sizeof (*resource)); - return resource; + return kzalloc(sizeof(struct ebda_pci_rsrc), GFP_KERNEL); } static void __init print_bus_info (void) @@ -390,10 +369,9 @@ int __init ibmphp_access_ebda (void) debug ("now enter io table ---\n"); debug ("rio blk id: %x\n", blk_id); - rio_table_ptr = kmalloc (sizeof (struct rio_table_hdr), GFP_KERNEL); + rio_table_ptr = kzalloc(sizeof(struct rio_table_hdr), GFP_KERNEL); if (!rio_table_ptr) return -ENOMEM; - memset (rio_table_ptr, 0, sizeof (struct rio_table_hdr) ); rio_table_ptr->ver_num = readb (io_mem + offset); rio_table_ptr->scal_count = readb (io_mem + offset + 1); rio_table_ptr->riodev_count = readb (io_mem + offset + 2); @@ -445,10 +423,9 @@ static int __init ebda_rio_table (void) // we do concern about rio details for (i = 0; i < rio_table_ptr->riodev_count; i++) { - rio_detail_ptr = kmalloc (sizeof (struct rio_detail), GFP_KERNEL); + rio_detail_ptr = kzalloc(sizeof(struct rio_detail), GFP_KERNEL); if (!rio_detail_ptr) return -ENOMEM; - memset (rio_detail_ptr, 0, sizeof (struct rio_detail)); rio_detail_ptr->rio_node_id = readb (io_mem + offset); rio_detail_ptr->bbar = readl (io_mem + offset + 1); rio_detail_ptr->rio_type = readb (io_mem + offset + 5); @@ -503,10 +480,9 @@ static int __init combine_wpg_for_chassis (void) rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); if (!opt_rio_ptr) { - opt_rio_ptr = (struct opt_rio *) kmalloc (sizeof (struct opt_rio), GFP_KERNEL); + opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); if (!opt_rio_ptr) return -ENOMEM; - memset (opt_rio_ptr, 0, sizeof (struct opt_rio)); opt_rio_ptr->rio_type = rio_detail_ptr->rio_type; opt_rio_ptr->chassis_num = rio_detail_ptr->chassis_num; opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num; @@ -546,10 +522,9 @@ static int combine_wpg_for_expansion (void) rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); if (!opt_rio_lo_ptr) { - opt_rio_lo_ptr = (struct opt_rio_lo *) kmalloc (sizeof (struct opt_rio_lo), GFP_KERNEL); + opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); if (!opt_rio_lo_ptr) return -ENOMEM; - memset (opt_rio_lo_ptr, 0, sizeof (struct opt_rio_lo)); opt_rio_lo_ptr->rio_type = rio_detail_ptr->rio_type; opt_rio_lo_ptr->chassis_num = rio_detail_ptr->chassis_num; opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num; @@ -842,12 +817,11 @@ static int __init ebda_rsrc_controller (void) bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num); if (!bus_info_ptr2) { - bus_info_ptr1 = (struct bus_info *) kmalloc (sizeof (struct bus_info), GFP_KERNEL); + bus_info_ptr1 = kzalloc(sizeof(struct bus_info), GFP_KERNEL); if (!bus_info_ptr1) { rc = -ENOMEM; goto error_no_hp_slot; } - memset (bus_info_ptr1, 0, sizeof (struct bus_info)); bus_info_ptr1->slot_min = slot_ptr->slot_num; bus_info_ptr1->slot_max = slot_ptr->slot_num; bus_info_ptr1->slot_count += 1; @@ -946,19 +920,17 @@ static int __init ebda_rsrc_controller (void) // register slots with hpc core as well as create linked list of ibm slot for (index = 0; index < hpc_ptr->slot_count; index++) { - hp_slot_ptr = kmalloc(sizeof(*hp_slot_ptr), GFP_KERNEL); + hp_slot_ptr = kzalloc(sizeof(*hp_slot_ptr), GFP_KERNEL); if (!hp_slot_ptr) { rc = -ENOMEM; goto error_no_hp_slot; } - memset(hp_slot_ptr, 0, sizeof(*hp_slot_ptr)); - hp_slot_ptr->info = kmalloc (sizeof(struct hotplug_slot_info), GFP_KERNEL); + hp_slot_ptr->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!hp_slot_ptr->info) { rc = -ENOMEM; goto error_no_hp_info; } - memset(hp_slot_ptr->info, 0, sizeof(struct hotplug_slot_info)); hp_slot_ptr->name = kmalloc(30, GFP_KERNEL); if (!hp_slot_ptr->name) { @@ -966,14 +938,13 @@ static int __init ebda_rsrc_controller (void) goto error_no_hp_name; } - tmp_slot = kmalloc(sizeof(*tmp_slot), GFP_KERNEL); + tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL); if (!tmp_slot) { rc = -ENOMEM; goto error_no_slot; } - memset(tmp_slot, 0, sizeof(*tmp_slot)); - tmp_slot->flag = TRUE; + tmp_slot->flag = 1; tmp_slot->capabilities = hpc_ptr->slots[index].slot_cap; if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_133_MAX) == EBDA_SLOT_133_MAX) diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index 1a3eb8d3d4c..c3ac98a0a6a 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c @@ -34,9 +34,11 @@ #include <linux/pci.h> #include <linux/smp_lock.h> #include <linux/init.h> +#include <linux/mutex.h> + #include "ibmphp.h" -static int to_debug = FALSE; +static int to_debug = 0; #define debug_polling(fmt, arg...) do { if (to_debug) debug (fmt, arg); } while (0) //---------------------------------------------------------------------------- @@ -93,15 +95,15 @@ static int to_debug = FALSE; //---------------------------------------------------------------------------- // macro utilities //---------------------------------------------------------------------------- -// if bits 20,22,25,26,27,29,30 are OFF return TRUE -#define HPC_I2CSTATUS_CHECK(s) ((u8)((s & 0x00000A76) ? FALSE : TRUE)) +// if bits 20,22,25,26,27,29,30 are OFF return 1 +#define HPC_I2CSTATUS_CHECK(s) ((u8)((s & 0x00000A76) ? 0 : 1)) //---------------------------------------------------------------------------- // global variables //---------------------------------------------------------------------------- static int ibmphp_shutdown; static int tid_poll; -static struct semaphore sem_hpcaccess; // lock access to HPC +static struct mutex sem_hpcaccess; // lock access to HPC static struct semaphore semOperations; // lock all operations and // access to data structures static struct semaphore sem_exit; // make sure polling thread goes away @@ -131,11 +133,11 @@ void __init ibmphp_hpc_initvars (void) { debug ("%s - Entry\n", __FUNCTION__); - init_MUTEX (&sem_hpcaccess); + mutex_init(&sem_hpcaccess); init_MUTEX (&semOperations); init_MUTEX_LOCKED (&sem_exit); - to_debug = FALSE; - ibmphp_shutdown = FALSE; + to_debug = 0; + ibmphp_shutdown = 0; tid_poll = 0; debug ("%s - Exit\n", __FUNCTION__); @@ -737,21 +739,21 @@ int ibmphp_hpc_writeslot (struct slot * pslot, u8 cmd) // check controller is still not working on the command //-------------------------------------------------------------------- timeout = CMD_COMPLETE_TOUT_SEC; - done = FALSE; + done = 0; while (!done) { rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status); if (!rc) { if (NEEDTOCHECK_CMDSTATUS (cmd)) { if (CTLR_FINISHED (status) == HPC_CTLR_FINISHED_YES) - done = TRUE; + done = 1; } else - done = TRUE; + done = 1; } if (!done) { msleep(1000); if (timeout < 1) { - done = TRUE; + done = 1; err ("%s - Error command complete timeout\n", __FUNCTION__); rc = -EFAULT; } else @@ -778,7 +780,7 @@ int ibmphp_hpc_writeslot (struct slot * pslot, u8 cmd) *---------------------------------------------------------------------*/ static void get_hpc_access (void) { - down (&sem_hpcaccess); + mutex_lock(&sem_hpcaccess); } /*---------------------------------------------------------------------- @@ -786,7 +788,7 @@ static void get_hpc_access (void) *---------------------------------------------------------------------*/ void free_hpc_access (void) { - up (&sem_hpcaccess); + mutex_unlock(&sem_hpcaccess); } /*---------------------------------------------------------------------- @@ -797,7 +799,7 @@ void free_hpc_access (void) void ibmphp_lock_operations (void) { down (&semOperations); - to_debug = TRUE; + to_debug = 1; } /*---------------------------------------------------------------------- @@ -807,7 +809,7 @@ void ibmphp_unlock_operations (void) { debug ("%s - Entry\n", __FUNCTION__); up (&semOperations); - to_debug = FALSE; + to_debug = 0; debug ("%s - Exit\n", __FUNCTION__); } @@ -935,40 +937,40 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot) { u8 status; int rc = 0; - u8 disable = FALSE; - u8 update = FALSE; + u8 disable = 0; + u8 update = 0; debug ("process_changeinstatus - Entry pslot[%p], poldslot[%p]\n", pslot, poldslot); // bit 0 - HPC_SLOT_POWER if ((pslot->status & 0x01) != (poldslot->status & 0x01)) - update = TRUE; + update = 1; // bit 1 - HPC_SLOT_CONNECT // ignore // bit 2 - HPC_SLOT_ATTN if ((pslot->status & 0x04) != (poldslot->status & 0x04)) - update = TRUE; + update = 1; // bit 3 - HPC_SLOT_PRSNT2 // bit 4 - HPC_SLOT_PRSNT1 if (((pslot->status & 0x08) != (poldslot->status & 0x08)) || ((pslot->status & 0x10) != (poldslot->status & 0x10))) - update = TRUE; + update = 1; // bit 5 - HPC_SLOT_PWRGD if ((pslot->status & 0x20) != (poldslot->status & 0x20)) // OFF -> ON: ignore, ON -> OFF: disable slot if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) - disable = TRUE; + disable = 1; // bit 6 - HPC_SLOT_BUS_SPEED // ignore // bit 7 - HPC_SLOT_LATCH if ((pslot->status & 0x80) != (poldslot->status & 0x80)) { - update = TRUE; + update = 1; // OPEN -> CLOSE if (pslot->status & 0x80) { if (SLOT_PWRGD (pslot->status)) { @@ -977,7 +979,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot) msleep(1000); rc = ibmphp_hpc_readslot (pslot, READ_SLOTSTATUS, &status); if (SLOT_PWRGD (status)) - update = TRUE; + update = 1; else // overwrite power in pslot to OFF pslot->status &= ~HPC_SLOT_POWER; } @@ -985,17 +987,17 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot) // CLOSE -> OPEN else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) { - disable = TRUE; + disable = 1; } // else - ignore } // bit 4 - HPC_SLOT_BLINK_ATTN if ((pslot->ext_status & 0x08) != (poldslot->ext_status & 0x08)) - update = TRUE; + update = 1; if (disable) { debug ("process_changeinstatus - disable slot\n"); - pslot->flag = FALSE; + pslot->flag = 0; rc = ibmphp_do_disable_slot (pslot); } @@ -1100,7 +1102,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void) { debug ("%s - Entry\n", __FUNCTION__); - ibmphp_shutdown = TRUE; + ibmphp_shutdown = 1; debug ("before locking operations \n"); ibmphp_lock_operations (); debug ("after locking operations \n"); @@ -1134,7 +1136,7 @@ static int hpc_wait_ctlr_notworking (int timeout, struct controller *ctlr_ptr, v u8 * pstatus) { int rc = 0; - u8 done = FALSE; + u8 done = 0; debug_polling ("hpc_wait_ctlr_notworking - Entry timeout[%d]\n", timeout); @@ -1142,14 +1144,14 @@ static int hpc_wait_ctlr_notworking (int timeout, struct controller *ctlr_ptr, v *pstatus = ctrl_read (ctlr_ptr, wpg_bbar, WPG_CTLR_INDEX); if (*pstatus == HPC_ERROR) { rc = HPC_ERROR; - done = TRUE; + done = 1; } if (CTLR_WORKING (*pstatus) == HPC_CTLR_WORKING_NO) - done = TRUE; + done = 1; if (!done) { msleep(1000); if (timeout < 1) { - done = TRUE; + done = 1; err ("HPCreadslot - Error ctlr timeout\n"); rc = HPC_ERROR; } else diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c index 155133fe5c1..d87a9e3eaee 100644 --- a/drivers/pci/hotplug/ibmphp_pci.c +++ b/drivers/pci/hotplug/ibmphp_pci.c @@ -164,12 +164,11 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cleanup_count = 6; goto error; } - newfunc = kmalloc(sizeof(*newfunc), GFP_KERNEL); + newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) { err ("out of system memory\n"); return -ENOMEM; } - memset (newfunc, 0, sizeof (struct pci_func)); newfunc->busno = cur_func->busno; newfunc->device = device; cur_func->next = newfunc; @@ -200,15 +199,14 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) } pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); - flag = FALSE; + flag = 0; for (i = 0; i < 32; i++) { if (func->devices[i]) { - newfunc = kmalloc(sizeof(*newfunc), GFP_KERNEL); + newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) { err ("out of system memory\n"); return -ENOMEM; } - memset (newfunc, 0, sizeof (struct pci_func)); newfunc->busno = sec_number; newfunc->device = (u8) i; for (j = 0; j < 4; j++) @@ -228,16 +226,15 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cleanup_count = 2; goto error; } - flag = TRUE; + flag = 1; } } - newfunc = kmalloc(sizeof(*newfunc), GFP_KERNEL); + newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) { err ("out of system memory\n"); return -ENOMEM; } - memset (newfunc, 0, sizeof (struct pci_func)); newfunc->busno = cur_func->busno; newfunc->device = device; for (j = 0; j < 4; j++) @@ -275,16 +272,15 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cur_func->busno, device, function); pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); debug ("after configuring bridge..., sec_number = %x\n", sec_number); - flag = FALSE; + flag = 0; for (i = 0; i < 32; i++) { if (func->devices[i]) { debug ("inside for loop, device is %x\n", i); - newfunc = kmalloc(sizeof(*newfunc), GFP_KERNEL); + newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL); if (!newfunc) { err (" out of system memory\n"); return -ENOMEM; } - memset (newfunc, 0, sizeof (struct pci_func)); newfunc->busno = sec_number; newfunc->device = (u8) i; for (j = 0; j < 4; j++) @@ -305,7 +301,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cleanup_count = 2; goto error; } - flag = TRUE; + flag = 1; } } @@ -405,13 +401,12 @@ static int configure_device (struct pci_func *func) debug ("len[count] in IO %x, count %d\n", len[count], count); - io[count] = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!io[count]) { err ("out of system memory\n"); return -ENOMEM; } - memset (io[count], 0, sizeof (struct resource_node)); io[count]->type = IO; io[count]->busno = func->busno; io[count]->devfunc = PCI_DEVFN(func->device, func->function); @@ -444,29 +439,27 @@ static int configure_device (struct pci_func *func) debug ("len[count] in PFMEM %x, count %d\n", len[count], count); - pfmem[count] = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!pfmem[count]) { err ("out of system memory\n"); return -ENOMEM; } - memset (pfmem[count], 0, sizeof (struct resource_node)); pfmem[count]->type = PFMEM; pfmem[count]->busno = func->busno; pfmem[count]->devfunc = PCI_DEVFN(func->device, func->function); pfmem[count]->len = len[count]; - pfmem[count]->fromMem = FALSE; + pfmem[count]->fromMem = 0; if (ibmphp_check_resource (pfmem[count], 0) == 0) { ibmphp_add_resource (pfmem[count]); func->pfmem[count] = pfmem[count]; } else { - mem_tmp = kmalloc(sizeof(*mem_tmp), GFP_KERNEL); + mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); if (!mem_tmp) { err ("out of system memory\n"); kfree (pfmem[count]); return -ENOMEM; } - memset (mem_tmp, 0, sizeof (struct resource_node)); mem_tmp->type = MEM; mem_tmp->busno = pfmem[count]->busno; mem_tmp->devfunc = pfmem[count]->devfunc; @@ -474,7 +467,7 @@ static int configure_device (struct pci_func *func) debug ("there's no pfmem... going into mem.\n"); if (ibmphp_check_resource (mem_tmp, 0) == 0) { ibmphp_add_resource (mem_tmp); - pfmem[count]->fromMem = TRUE; + pfmem[count]->fromMem = 1; pfmem[count]->rangeno = mem_tmp->rangeno; pfmem[count]->start = mem_tmp->start; pfmem[count]->end = mem_tmp->end; @@ -512,12 +505,11 @@ static int configure_device (struct pci_func *func) debug ("len[count] in Mem %x, count %d\n", len[count], count); - mem[count] = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem[count]) { err ("out of system memory\n"); return -ENOMEM; } - memset (mem[count], 0, sizeof (struct resource_node)); mem[count]->type = MEM; mem[count]->busno = func->busno; mem[count]->devfunc = PCI_DEVFN(func->device, @@ -579,11 +571,11 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) u16 pfmem_base; u32 bar[2]; u32 len[2]; - u8 flag_io = FALSE; - u8 flag_mem = FALSE; - u8 flag_pfmem = FALSE; - u8 need_io_upper = FALSE; - u8 need_pfmem_upper = FALSE; + u8 flag_io = 0; + u8 flag_mem = 0; + u8 flag_pfmem = 0; + u8 need_io_upper = 0; + u8 need_pfmem_upper = 0; struct res_needed *amount_needed = NULL; struct resource_node *io = NULL; struct resource_node *bus_io[2] = {NULL, NULL}; @@ -677,14 +669,13 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("len[count] in IO = %x\n", len[count]); - bus_io[count] = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!bus_io[count]) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (bus_io[count], 0, sizeof (struct resource_node)); bus_io[count]->type = IO; bus_io[count]->busno = func->busno; bus_io[count]->devfunc = PCI_DEVFN(func->device, @@ -711,37 +702,35 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("len[count] in PFMEM = %x\n", len[count]); - bus_pfmem[count] = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + bus_pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!bus_pfmem[count]) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (bus_pfmem[count], 0, sizeof (struct resource_node)); bus_pfmem[count]->type = PFMEM; bus_pfmem[count]->busno = func->busno; bus_pfmem[count]->devfunc = PCI_DEVFN(func->device, func->function); bus_pfmem[count]->len = len[count]; - bus_pfmem[count]->fromMem = FALSE; + bus_pfmem[count]->fromMem = 0; if (ibmphp_check_resource (bus_pfmem[count], 0) == 0) { ibmphp_add_resource (bus_pfmem[count]); func->pfmem[count] = bus_pfmem[count]; } else { - mem_tmp = kmalloc(sizeof(*mem_tmp), GFP_KERNEL); + mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); if (!mem_tmp) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (mem_tmp, 0, sizeof (struct resource_node)); mem_tmp->type = MEM; mem_tmp->busno = bus_pfmem[count]->busno; mem_tmp->devfunc = bus_pfmem[count]->devfunc; mem_tmp->len = bus_pfmem[count]->len; if (ibmphp_check_resource (mem_tmp, 0) == 0) { ibmphp_add_resource (mem_tmp); - bus_pfmem[count]->fromMem = TRUE; + bus_pfmem[count]->fromMem = 1; bus_pfmem[count]->rangeno = mem_tmp->rangeno; ibmphp_add_pfmem_from_mem (bus_pfmem[count]); func->pfmem[count] = bus_pfmem[count]; @@ -770,13 +759,12 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("len[count] in Memory is %x\n", len[count]); - bus_mem[count] = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + bus_mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!bus_mem[count]) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (bus_mem[count], 0, sizeof (struct resource_node)); bus_mem[count]->type = MEM; bus_mem[count]->busno = func->busno; bus_mem[count]->devfunc = PCI_DEVFN(func->device, @@ -838,17 +826,16 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) if (!amount_needed->io) { debug ("it doesn't want IO?\n"); - flag_io = TRUE; + flag_io = 1; } else { debug ("it wants %x IO behind the bridge\n", amount_needed->io); - io = kmalloc(sizeof(*io), GFP_KERNEL); + io = kzalloc(sizeof(*io), GFP_KERNEL); if (!io) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (io, 0, sizeof (struct resource_node)); io->type = IO; io->busno = func->busno; io->devfunc = PCI_DEVFN(func->device, func->function); @@ -856,71 +843,68 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) if (ibmphp_check_resource (io, 1) == 0) { debug ("were we able to add io\n"); ibmphp_add_resource (io); - flag_io = TRUE; + flag_io = 1; } } if (!amount_needed->mem) { debug ("it doesn't want n.e.memory?\n"); - flag_mem = TRUE; + flag_mem = 1; } else { debug ("it wants %x memory behind the bridge\n", amount_needed->mem); - mem = kmalloc(sizeof(*mem), GFP_KERNEL); + mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (mem, 0, sizeof (struct resource_node)); mem->type = MEM; mem->busno = func->busno; mem->devfunc = PCI_DEVFN(func->device, func->function); mem->len = amount_needed->mem; if (ibmphp_check_resource (mem, 1) == 0) { ibmphp_add_resource (mem); - flag_mem = TRUE; + flag_mem = 1; debug ("were we able to add mem\n"); } } if (!amount_needed->pfmem) { debug ("it doesn't want n.e.pfmem mem?\n"); - flag_pfmem = TRUE; + flag_pfmem = 1; } else { debug ("it wants %x pfmemory behind the bridge\n", amount_needed->pfmem); - pfmem = kmalloc(sizeof(*pfmem), GFP_KERNEL); + pfmem = kzalloc(sizeof(*pfmem), GFP_KERNEL); if (!pfmem) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (pfmem, 0, sizeof (struct resource_node)); pfmem->type = PFMEM; pfmem->busno = func->busno; pfmem->devfunc = PCI_DEVFN(func->device, func->function); pfmem->len = amount_needed->pfmem; - pfmem->fromMem = FALSE; + pfmem->fromMem = 0; if (ibmphp_check_resource (pfmem, 1) == 0) { ibmphp_add_resource (pfmem); - flag_pfmem = TRUE; + flag_pfmem = 1; } else { - mem_tmp = kmalloc(sizeof(*mem_tmp), GFP_KERNEL); + mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL); if (!mem_tmp) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (mem_tmp, 0, sizeof (struct resource_node)); mem_tmp->type = MEM; mem_tmp->busno = pfmem->busno; mem_tmp->devfunc = pfmem->devfunc; mem_tmp->len = pfmem->len; if (ibmphp_check_resource (mem_tmp, 1) == 0) { ibmphp_add_resource (mem_tmp); - pfmem->fromMem = TRUE; + pfmem->fromMem = 1; pfmem->rangeno = mem_tmp->rangeno; ibmphp_add_pfmem_from_mem (pfmem); - flag_pfmem = TRUE; + flag_pfmem = 1; } } } @@ -936,13 +920,12 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) */ bus = ibmphp_find_res_bus (sec_number); if (!bus) { - bus = kmalloc(sizeof(*bus), GFP_KERNEL); + bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { err ("out of system memory\n"); retval = -ENOMEM; goto error; } - memset (bus, 0, sizeof (struct bus_node)); bus->busno = sec_number; debug ("b4 adding new bus\n"); rc = add_new_bus (bus, io, mem, pfmem, func->busno); @@ -967,11 +950,11 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) if ((io_base & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { debug ("io 32\n"); - need_io_upper = TRUE; + need_io_upper = 1; } if ((pfmem_base & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { debug ("pfmem 64\n"); - need_pfmem_upper = TRUE; + need_pfmem_upper = 1; } if (bus->noIORanges) { @@ -1111,10 +1094,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) }; struct res_needed *amount; - amount = kmalloc(sizeof(*amount), GFP_KERNEL); + amount = kzalloc(sizeof(*amount), GFP_KERNEL); if (amount == NULL) return NULL; - memset (amount, 0, sizeof (struct res_needed)); ibmphp_pci_bus->number = busno; @@ -1137,7 +1119,7 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) debug ("hdr_type behind the bridge is %x\n", hdr_type); if (hdr_type & PCI_HEADER_TYPE_BRIDGE) { err ("embedded bridges not supported for hot-plugging.\n"); - amount->not_correct = TRUE; + amount->not_correct = 1; return amount; } @@ -1145,12 +1127,12 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) if (class == PCI_CLASS_NOT_DEFINED_VGA) { err ("The device %x is VGA compatible and as is not supported for hot plugging. " "Please choose another device.\n", device); - amount->not_correct = TRUE; + amount->not_correct = 1; return amount; } else if (class == PCI_CLASS_DISPLAY_VGA) { err ("The device %x is not supported for hot plugging. " "Please choose another device.\n", device); - amount->not_correct = TRUE; + amount->not_correct = 1; return amount; } @@ -1210,9 +1192,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) } /* end for */ if (!howmany) - amount->not_correct = TRUE; + amount->not_correct = 1; else - amount->not_correct = FALSE; + amount->not_correct = 0; if ((amount->io) && (amount->io < IOBRIDGE)) amount->io = IOBRIDGE; if ((amount->mem) && (amount->mem < MEMBRIDGE)) @@ -1672,12 +1654,11 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r list_add (&bus->bus_list, &cur_bus->bus_list); } if (io) { - io_range = kmalloc(sizeof(*io_range), GFP_KERNEL); + io_range = kzalloc(sizeof(*io_range), GFP_KERNEL); if (!io_range) { err ("out of system memory\n"); return -ENOMEM; } - memset (io_range, 0, sizeof (struct range_node)); io_range->start = io->start; io_range->end = io->end; io_range->rangeno = 1; @@ -1685,12 +1666,11 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r bus->rangeIO = io_range; } if (mem) { - mem_range = kmalloc(sizeof(*mem_range), GFP_KERNEL); + mem_range = kzalloc(sizeof(*mem_range), GFP_KERNEL); if (!mem_range) { err ("out of system memory\n"); return -ENOMEM; } - memset (mem_range, 0, sizeof (struct range_node)); mem_range->start = mem->start; mem_range->end = mem->end; mem_range->rangeno = 1; @@ -1698,12 +1678,11 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r bus->rangeMem = mem_range; } if (pfmem) { - pfmem_range = kmalloc(sizeof(*pfmem_range), GFP_KERNEL); + pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL); if (!pfmem_range) { err ("out of system memory\n"); return -ENOMEM; } - memset (pfmem_range, 0, sizeof (struct range_node)); pfmem_range->start = pfmem->start; pfmem_range->end = pfmem->end; pfmem_range->rangeno = 1; diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index 9c224c94d69..5636b1ac2a2 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -55,13 +55,12 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8 return NULL; } - newbus = kmalloc (sizeof (struct bus_node), GFP_KERNEL); + newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); if (!newbus) { err ("out of system memory\n"); return NULL; } - memset (newbus, 0, sizeof (struct bus_node)); if (flag) newbus->busno = busno; else @@ -79,12 +78,11 @@ static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * cur return NULL; } - rs = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + rs = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!rs) { err ("out of system memory\n"); return NULL; } - memset (rs, 0, sizeof (struct resource_node)); rs->busno = curr->bus_num; rs->devfunc = curr->dev_fun; rs->start = curr->start_addr; @@ -100,12 +98,11 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node u8 num_ranges = 0; if (first_bus) { - newbus = kmalloc (sizeof (struct bus_node), GFP_KERNEL); + newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); if (!newbus) { err ("out of system memory.\n"); return -ENOMEM; } - memset (newbus, 0, sizeof (struct bus_node)); newbus->busno = curr->bus_num; } else { newbus = *new_bus; @@ -122,14 +119,13 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node } } - newrange = kmalloc (sizeof (struct range_node), GFP_KERNEL); + newrange = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!newrange) { if (first_bus) kfree (newbus); err ("out of system memory\n"); return -ENOMEM; } - memset (newrange, 0, sizeof (struct range_node)); newrange->start = curr->start_addr; newrange->end = curr->end_addr; @@ -329,7 +325,7 @@ int __init ibmphp_rsrc_init (void) if (!new_pfmem) return -ENOMEM; new_pfmem->type = PFMEM; - new_pfmem->fromMem = FALSE; + new_pfmem->fromMem = 0; if (ibmphp_add_resource (new_pfmem) < 0) { newbus = alloc_error_bus (curr, 0, 0); if (!newbus) @@ -466,7 +462,7 @@ static int add_range (int type, struct range_node *range, struct bus_node *bus_c static void update_resources (struct bus_node *bus_cur, int type, int rangeno) { struct resource_node *res = NULL; - u8 eol = FALSE; /* end of list indicator */ + u8 eol = 0; /* end of list indicator */ switch (type) { case MEM: @@ -492,7 +488,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno) else if (res->nextRange) res = res->nextRange; else { - eol = TRUE; + eol = 1; break; } } @@ -983,7 +979,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) int noranges = 0; u32 tmp_start; /* this is to make sure start address is divisible by the length needed */ u32 tmp_divide; - u8 flag = FALSE; + u8 flag = 0; if (!res) return -EINVAL; @@ -1050,17 +1046,17 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; - flag = FALSE; + flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; @@ -1089,17 +1085,17 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if (((res_cur->end + 1) % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = res_cur->end + 1; } else { /* Needs adjusting */ tmp_start = res_cur->end + 1; - flag = FALSE; + flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; @@ -1127,17 +1123,17 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; - flag = FALSE; + flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; @@ -1162,17 +1158,17 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if ((len_tmp < len_cur) || (len_cur == 0)) { if (((res_prev->end + 1) % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = res_prev->end + 1; } else { /* Needs adjusting */ tmp_start = res_prev->end + 1; - flag = FALSE; + flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; @@ -1221,17 +1217,17 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; - flag = FALSE; + flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; @@ -1285,17 +1281,17 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; - flag = FALSE; + flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { - flag = TRUE; + flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; @@ -1688,7 +1684,7 @@ static int __init once_over (void) bus_cur = list_entry (tmp, struct bus_node, bus_list); if ((!bus_cur->rangePFMem) && (bus_cur->firstPFMem)) { for (pfmem_cur = bus_cur->firstPFMem, pfmem_prev = NULL; pfmem_cur; pfmem_prev = pfmem_cur, pfmem_cur = pfmem_cur->next) { - pfmem_cur->fromMem = TRUE; + pfmem_cur->fromMem = 1; if (pfmem_prev) pfmem_prev->next = pfmem_cur->next; else @@ -1705,12 +1701,11 @@ static int __init once_over (void) bus_cur->firstPFMemFromMem = pfmem_cur; - mem = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem) { err ("out of system memory\n"); return -ENOMEM; } - memset (mem, 0, sizeof (struct resource_node)); mem->type = MEM; mem->busno = pfmem_cur->busno; mem->devfunc = pfmem_cur->devfunc; @@ -1994,12 +1989,11 @@ static int __init update_bridge_ranges (struct bus_node **bus) end_address |= (upper_io_end << 16); if ((start_address) && (start_address <= end_address)) { - range = kmalloc (sizeof (struct range_node), GFP_KERNEL); + range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) { err ("out of system memory\n"); return -ENOMEM; } - memset (range, 0, sizeof (struct range_node)); range->start = start_address; range->end = end_address + 0xfff; @@ -2020,13 +2014,12 @@ static int __init update_bridge_ranges (struct bus_node **bus) fix_resources (bus_sec); if (ibmphp_find_resource (bus_cur, start_address, &io, IO)) { - io = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + io = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!io) { kfree (range); err ("out of system memory\n"); return -ENOMEM; } - memset (io, 0, sizeof (struct resource_node)); io->type = IO; io->busno = bus_cur->busno; io->devfunc = ((device << 3) | (function & 0x7)); @@ -2045,12 +2038,11 @@ static int __init update_bridge_ranges (struct bus_node **bus) if ((start_address) && (start_address <= end_address)) { - range = kmalloc (sizeof (struct range_node), GFP_KERNEL); + range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) { err ("out of system memory\n"); return -ENOMEM; } - memset (range, 0, sizeof (struct range_node)); range->start = start_address; range->end = end_address + 0xfffff; @@ -2072,13 +2064,12 @@ static int __init update_bridge_ranges (struct bus_node **bus) fix_resources (bus_sec); if (ibmphp_find_resource (bus_cur, start_address, &mem, MEM)) { - mem = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem) { kfree (range); err ("out of system memory\n"); return -ENOMEM; } - memset (mem, 0, sizeof (struct resource_node)); mem->type = MEM; mem->busno = bus_cur->busno; mem->devfunc = ((device << 3) | (function & 0x7)); @@ -2101,12 +2092,11 @@ static int __init update_bridge_ranges (struct bus_node **bus) if ((start_address) && (start_address <= end_address)) { - range = kmalloc (sizeof (struct range_node), GFP_KERNEL); + range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) { err ("out of system memory\n"); return -ENOMEM; } - memset (range, 0, sizeof (struct range_node)); range->start = start_address; range->end = end_address + 0xfffff; @@ -2127,20 +2117,19 @@ static int __init update_bridge_ranges (struct bus_node **bus) fix_resources (bus_sec); if (ibmphp_find_resource (bus_cur, start_address, &pfmem, PFMEM)) { - pfmem = kmalloc (sizeof (struct resource_node), GFP_KERNEL); + pfmem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!pfmem) { kfree (range); err ("out of system memory\n"); return -ENOMEM; } - memset (pfmem, 0, sizeof (struct resource_node)); pfmem->type = PFMEM; pfmem->busno = bus_cur->busno; pfmem->devfunc = ((device << 3) | (function & 0x7)); pfmem->start = start_address; pfmem->end = end_address + 0xfffff; pfmem->len = pfmem->end - pfmem->start + 1; - pfmem->fromMem = FALSE; + pfmem->fromMem = 0; ibmphp_add_resource (pfmem); } diff --git a/drivers/pci/hotplug/pci_hotplug.h b/drivers/pci/hotplug/pci_hotplug.h index 88d44f7fef2..eb0d01d4723 100644 --- a/drivers/pci/hotplug/pci_hotplug.h +++ b/drivers/pci/hotplug/pci_hotplug.h @@ -176,5 +176,21 @@ extern int pci_hp_change_slot_info (struct hotplug_slot *slot, struct hotplug_slot_info *info); extern struct subsystem pci_hotplug_slots_subsys; +struct hotplug_params { + u8 cache_line_size; + u8 latency_timer; + u8 enable_serr; + u8 enable_perr; +}; + +#ifdef CONFIG_ACPI +#include <acpi/acpi.h> +#include <acpi/acpi_bus.h> +#include <acpi/actypes.h> +extern acpi_status acpi_run_oshp(acpi_handle handle); +extern acpi_status acpi_get_hp_params_from_firmware(struct pci_dev *dev, + struct hotplug_params *hpp); +int acpi_root_bridge(acpi_handle handle); +#endif #endif diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 0aac6a61337..92c1f0f1e1a 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -34,6 +34,7 @@ #include <linux/delay.h> #include <linux/sched.h> /* signal_pending() */ #include <linux/pcieport_if.h> +#include <linux/mutex.h> #include "pci_hotplug.h" #define MY_NAME "pciehp" @@ -49,12 +50,6 @@ extern int pciehp_force; #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) -struct hotplug_params { - u8 cache_line_size; - u8 latency_timer; - u8 enable_serr; - u8 enable_perr; -}; struct slot { struct slot *next; @@ -96,7 +91,7 @@ struct php_ctlr_state_s { #define MAX_EVENTS 10 struct controller { struct controller *next; - struct semaphore crit_sect; /* critical section semaphore */ + struct mutex crit_sect; /* critical section mutex */ struct php_ctlr_state_s *hpc_ctlr_handle; /* HPC controller handle */ int num_slots; /* Number of slots on ctlr */ int slot_num_inc; /* 1 or -1 */ @@ -191,9 +186,6 @@ extern u8 pciehp_handle_power_fault (u8 hp_slot, void *inst_id); /* pci functions */ extern int pciehp_configure_device (struct slot *p_slot); extern int pciehp_unconfigure_device (struct slot *p_slot); -extern int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev); -extern void pciehp_get_hp_params_from_firmware(struct pci_dev *dev, - struct hotplug_params *hpp); @@ -285,4 +277,19 @@ struct hpc_ops { int (*check_lnk_status) (struct controller *ctrl); }; + +#ifdef CONFIG_ACPI +#define pciehp_get_hp_hw_control_from_firmware(dev) \ + pciehp_acpi_get_hp_hw_control_from_firmware(dev) +static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, + struct hotplug_params *hpp) +{ + if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev, hpp))) + return -ENODEV; + return 0; +} +#else +#define pciehp_get_hp_hw_control_from_firmware(dev) 0 +#define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV) +#endif /* CONFIG_ACPI */ #endif /* _PCIEHP_H */ diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 4fb569018a2..601cf9045b2 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -117,27 +117,23 @@ static int init_slots(struct controller *ctrl) slot_number = ctrl->first_slot; while (number_of_slots) { - slot = kmalloc(sizeof(*slot), GFP_KERNEL); + slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) goto error; - memset(slot, 0, sizeof(struct slot)); slot->hotplug_slot = - kmalloc(sizeof(*(slot->hotplug_slot)), + kzalloc(sizeof(*(slot->hotplug_slot)), GFP_KERNEL); if (!slot->hotplug_slot) goto error_slot; hotplug_slot = slot->hotplug_slot; - memset(hotplug_slot, 0, sizeof(struct hotplug_slot)); hotplug_slot->info = - kmalloc(sizeof(*(hotplug_slot->info)), + kzalloc(sizeof(*(hotplug_slot->info)), GFP_KERNEL); if (!hotplug_slot->info) goto error_hpslot; hotplug_slot_info = hotplug_slot->info; - memset(hotplug_slot_info, 0, - sizeof(struct hotplug_slot_info)); hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL); if (!hotplug_slot->name) goto error_info; @@ -373,12 +369,11 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ u8 value; struct pci_dev *pdev; - ctrl = kmalloc(sizeof(*ctrl), GFP_KERNEL); + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { err("%s : out of memory\n", __FUNCTION__); goto err_out_none; } - memset(ctrl, 0, sizeof(struct controller)); pdev = dev->port; ctrl->pci_dev = pdev; @@ -439,7 +434,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ } /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */ @@ -447,7 +442,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/ if (rc) { /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); goto err_out_free_ctrl_slot; } else /* Wait for the command to complete */ @@ -455,7 +450,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return 0; diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 83c4b865718..33d19876835 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -229,13 +229,13 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id) static void set_slot_off(struct controller *ctrl, struct slot * pslot) { /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ if (POWER_CTRL(ctrl->ctrlcap)) { if (pslot->hpc_ops->power_off_slot(pslot)) { err("%s: Issue of Slot Power Off command failed\n", __FUNCTION__); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return; } wait_for_ctrl_irq (ctrl); @@ -249,14 +249,14 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) if (ATTN_LED(ctrl->ctrlcap)) { if (pslot->hpc_ops->set_attention_status(pslot, 1)) { err("%s: Issue of Set Attention Led command failed\n", __FUNCTION__); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return; } wait_for_ctrl_irq (ctrl); } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); } /** @@ -279,13 +279,13 @@ static int board_added(struct slot *p_slot) ctrl->slot_device_offset, hp_slot); /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); if (POWER_CTRL(ctrl->ctrlcap)) { /* Power on slot */ rc = p_slot->hpc_ops->power_on_slot(p_slot); if (rc) { - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return -1; } @@ -301,7 +301,7 @@ static int board_added(struct slot *p_slot) } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); /* Wait for ~1 second */ wait_for_ctrl_irq (ctrl); @@ -335,7 +335,7 @@ static int board_added(struct slot *p_slot) pci_fixup_device(pci_fixup_final, ctrl->pci_dev); if (PWR_LED(ctrl->ctrlcap)) { /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); p_slot->hpc_ops->green_led_on(p_slot); @@ -343,7 +343,7 @@ static int board_added(struct slot *p_slot) wait_for_ctrl_irq (ctrl); /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); } return 0; @@ -375,14 +375,14 @@ static int remove_board(struct slot *p_slot) dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot); /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); if (POWER_CTRL(ctrl->ctrlcap)) { /* power off slot */ rc = p_slot->hpc_ops->power_off_slot(p_slot); if (rc) { err("%s: Issue of Slot Disable command failed\n", __FUNCTION__); - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return rc; } /* Wait for the command to complete */ @@ -398,7 +398,7 @@ static int remove_board(struct slot *p_slot) } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); return 0; } @@ -445,7 +445,7 @@ static void pciehp_pushbutton_thread(unsigned long slot) if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) { /* Wait for exclusive access to hardware */ - down(&p_slot->ctrl->crit_sect); + mutex_lock(&p_slot->ctrl->crit_sect); p_slot->hpc_ops->green_led_off(p_slot); @@ -453,7 +453,7 @@ static void pciehp_pushbutton_thread(unsigned long slot) wait_for_ctrl_irq (p_slot->ctrl); /* Done with exclusive hardware access */ - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); } p_slot->state = STATIC_STATE; } @@ -495,7 +495,7 @@ static void pciehp_surprise_rm_thread(unsigned long slot) if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) { /* Wait for exclusive access to hardware */ - down(&p_slot->ctrl->crit_sect); + mutex_lock(&p_slot->ctrl->crit_sect); p_slot->hpc_ops->green_led_off(p_slot); @@ -503,7 +503,7 @@ static void pciehp_surprise_rm_thread(unsigned long slot) wait_for_ctrl_irq (p_slot->ctrl); /* Done with exclusive hardware access */ - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); } p_slot->state = STATIC_STATE; } @@ -616,7 +616,7 @@ static void interrupt_event_handler(struct controller *ctrl) switch (p_slot->state) { case BLINKINGOFF_STATE: /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); if (PWR_LED(ctrl->ctrlcap)) { p_slot->hpc_ops->green_led_on(p_slot); @@ -630,11 +630,11 @@ static void interrupt_event_handler(struct controller *ctrl) wait_for_ctrl_irq (ctrl); } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); break; case BLINKINGON_STATE: /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); if (PWR_LED(ctrl->ctrlcap)) { p_slot->hpc_ops->green_led_off(p_slot); @@ -647,7 +647,7 @@ static void interrupt_event_handler(struct controller *ctrl) wait_for_ctrl_irq (ctrl); } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); break; default: @@ -676,7 +676,7 @@ static void interrupt_event_handler(struct controller *ctrl) } /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); /* blink green LED and turn off amber */ if (PWR_LED(ctrl->ctrlcap)) { @@ -693,7 +693,7 @@ static void interrupt_event_handler(struct controller *ctrl) } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); init_timer(&p_slot->task_event); p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ @@ -708,7 +708,7 @@ static void interrupt_event_handler(struct controller *ctrl) if (POWER_CTRL(ctrl->ctrlcap)) { dbg("power fault\n"); /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); + mutex_lock(&ctrl->crit_sect); if (ATTN_LED(ctrl->ctrlcap)) { p_slot->hpc_ops->set_attention_status(p_slot, 1); @@ -721,7 +721,7 @@ static void interrupt_event_handler(struct controller *ctrl) } /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + mutex_unlock(&ctrl->crit_sect); } } /***********SURPRISE REMOVAL********************/ @@ -756,19 +756,19 @@ int pciehp_enable_slot(struct slot *p_slot) int rc; /* Check to see if (latch closed, card present, power off) */ - down(&p_slot->ctrl->crit_sect); + mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { info("%s: no adapter on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); return 1; } if (MRL_SENS(p_slot->ctrl->ctrlcap)) { rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { info("%s: latch open on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); return 1; } } @@ -777,11 +777,11 @@ int pciehp_enable_slot(struct slot *p_slot) rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || getstatus) { info("%s: already enabled on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); return 1; } } - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); @@ -806,13 +806,13 @@ int pciehp_disable_slot(struct slot *p_slot) return 1; /* Check to see if (latch closed, card present, power on) */ - down(&p_slot->ctrl->crit_sect); + mutex_lock(&p_slot->ctrl->crit_sect); if (!HP_SUPR_RM(p_slot->ctrl->ctrlcap)) { ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (ret || !getstatus) { info("%s: no adapter on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); return 1; } } @@ -821,7 +821,7 @@ int pciehp_disable_slot(struct slot *p_slot) ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (ret || getstatus) { info("%s: latch open on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); return 1; } } @@ -830,12 +830,12 @@ int pciehp_disable_slot(struct slot *p_slot) ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (ret || !getstatus) { info("%s: already disabled slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); return 1; } } - up(&p_slot->ctrl->crit_sect); + mutex_unlock(&p_slot->ctrl->crit_sect); ret = remove_board(p_slot); update_slot_info(p_slot); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 77e530321de..6c14d9e46b2 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -38,7 +38,10 @@ #include "../pci.h" #include "pciehp.h" - +#include <acpi/acpi.h> +#include <acpi/acpi_bus.h> +#include <acpi/actypes.h> +#include <linux/pci-acpi.h> #ifdef DEBUG #define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */ #define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */ @@ -1236,6 +1239,76 @@ static struct hpc_ops pciehp_hpc_ops = { .check_lnk_status = hpc_check_lnk_status, }; +#ifdef CONFIG_ACPI +int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev) +{ + acpi_status status; + acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); + struct pci_dev *pdev = dev; + struct pci_bus *parent; + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; + + /* + * Per PCI firmware specification, we should run the ACPI _OSC + * method to get control of hotplug hardware before using it. + * If an _OSC is missing, we look for an OSHP to do the same thing. + * To handle different BIOS behavior, we look for _OSC and OSHP + * within the scope of the hotplug controller and its parents, upto + * the host bridge under which this controller exists. + */ + while (!handle) { + /* + * This hotplug controller was not listed in the ACPI name + * space at all. Try to get acpi handle of parent pci bus. + */ + if (!pdev || !pdev->bus->parent) + break; + parent = pdev->bus->parent; + dbg("Could not find %s in acpi namespace, trying parent\n", + pci_name(pdev)); + if (!parent->self) + /* Parent must be a host bridge */ + handle = acpi_get_pci_rootbridge_handle( + pci_domain_nr(parent), + parent->number); + else + handle = DEVICE_ACPI_HANDLE( + &(parent->self->dev)); + pdev = parent->self; + } + + while (handle) { + acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); + dbg("Trying to get hotplug control for %s \n", + (char *)string.pointer); + status = pci_osc_control_set(handle, + OSC_PCI_EXPRESS_NATIVE_HP_CONTROL); + if (status == AE_NOT_FOUND) + status = acpi_run_oshp(handle); + if (ACPI_SUCCESS(status)) { + dbg("Gained control for hotplug HW for pci %s (%s)\n", + pci_name(dev), (char *)string.pointer); + acpi_os_free(string.pointer); + return 0; + } + if (acpi_root_bridge(handle)) + break; + chandle = handle; + status = acpi_get_parent(chandle, &handle); + if (ACPI_FAILURE(status)) + break; + } + + err("Cannot get control of hotplug hardware for pci %s\n", + pci_name(dev)); + + acpi_os_free(string.pointer); + return -1; +} +#endif + + + int pcie_init(struct controller * ctrl, struct pcie_device *dev) { struct php_ctlr_state_s *php_ctlr, *p; @@ -1334,7 +1407,7 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev) if (pci_enable_device(pdev)) goto abort_free_ctlr; - init_MUTEX(&ctrl->crit_sect); + mutex_init(&ctrl->crit_sect); /* setup wait queue */ init_waitqueue_head(&ctrl->queue); diff --git a/drivers/pci/hotplug/pciehprm_acpi.c b/drivers/pci/hotplug/pciehprm_acpi.c deleted file mode 100644 index 2bdb30f68bf..00000000000 --- a/drivers/pci/hotplug/pciehprm_acpi.c +++ /dev/null @@ -1,257 +0,0 @@ -/* - * PCIEHPRM ACPI: PHP Resource Manager for ACPI platform - * - * Copyright (C) 2003-2004 Intel Corporation - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <kristen.c.accardi@intel.com> - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/pci.h> -#include <linux/acpi.h> -#include <linux/pci-acpi.h> -#include <acpi/acpi_bus.h> -#include <acpi/actypes.h> -#include "pciehp.h" - -#define METHOD_NAME__SUN "_SUN" -#define METHOD_NAME__HPP "_HPP" -#define METHOD_NAME_OSHP "OSHP" - -static u8 * acpi_path_name( acpi_handle handle) -{ - acpi_status status; - static u8 path_name[ACPI_PATHNAME_MAX]; - struct acpi_buffer ret_buf = { ACPI_PATHNAME_MAX, path_name }; - - memset(path_name, 0, sizeof (path_name)); - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &ret_buf); - - if (ACPI_FAILURE(status)) - return NULL; - else - return path_name; -} - -static acpi_status -acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) -{ - acpi_status status; - u8 nui[4]; - struct acpi_buffer ret_buf = { 0, NULL}; - union acpi_object *ext_obj, *package; - u8 *path_name = acpi_path_name(handle); - int i, len = 0; - - /* get _hpp */ - status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf); - switch (status) { - case AE_BUFFER_OVERFLOW: - ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL); - if (!ret_buf.pointer) { - err ("%s:%s alloc for _HPP fail\n", __FUNCTION__, - path_name); - return AE_NO_MEMORY; - } - status = acpi_evaluate_object(handle, METHOD_NAME__HPP, - NULL, &ret_buf); - if (ACPI_SUCCESS(status)) - break; - default: - if (ACPI_FAILURE(status)) { - dbg("%s:%s _HPP fail=0x%x\n", __FUNCTION__, - path_name, status); - return status; - } - } - - ext_obj = (union acpi_object *) ret_buf.pointer; - if (ext_obj->type != ACPI_TYPE_PACKAGE) { - err ("%s:%s _HPP obj not a package\n", __FUNCTION__, - path_name); - status = AE_ERROR; - goto free_and_return; - } - - len = ext_obj->package.count; - package = (union acpi_object *) ret_buf.pointer; - for ( i = 0; (i < len) || (i < 4); i++) { - ext_obj = (union acpi_object *) &package->package.elements[i]; - switch (ext_obj->type) { - case ACPI_TYPE_INTEGER: - nui[i] = (u8)ext_obj->integer.value; - break; - default: - err ("%s:%s _HPP obj type incorrect\n", __FUNCTION__, - path_name); - status = AE_ERROR; - goto free_and_return; - } - } - - hpp->cache_line_size = nui[0]; - hpp->latency_timer = nui[1]; - hpp->enable_serr = nui[2]; - hpp->enable_perr = nui[3]; - - dbg(" _HPP: cache_line_size=0x%x\n", hpp->cache_line_size); - dbg(" _HPP: latency timer =0x%x\n", hpp->latency_timer); - dbg(" _HPP: enable SERR =0x%x\n", hpp->enable_serr); - dbg(" _HPP: enable PERR =0x%x\n", hpp->enable_perr); - -free_and_return: - kfree(ret_buf.pointer); - return status; -} - -static acpi_status acpi_run_oshp(acpi_handle handle) -{ - acpi_status status; - u8 *path_name = acpi_path_name(handle); - - /* run OSHP */ - status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL); - if (ACPI_FAILURE(status)) { - dbg("%s:%s OSHP fails=0x%x\n", __FUNCTION__, path_name, - status); - } else { - dbg("%s:%s OSHP passes\n", __FUNCTION__, path_name); - } - return status; -} - -static int is_root_bridge(acpi_handle handle) -{ - acpi_status status; - struct acpi_device_info *info; - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - int i; - - status = acpi_get_object_info(handle, &buffer); - if (ACPI_SUCCESS(status)) { - info = buffer.pointer; - if ((info->valid & ACPI_VALID_HID) && - !strcmp(PCI_ROOT_HID_STRING, - info->hardware_id.value)) { - acpi_os_free(buffer.pointer); - return 1; - } - if (info->valid & ACPI_VALID_CID) { - for (i=0; i < info->compatibility_id.count; i++) { - if (!strcmp(PCI_ROOT_HID_STRING, - info->compatibility_id.id[i].value)) { - acpi_os_free(buffer.pointer); - return 1; - } - } - } - } - return 0; -} - -int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) -{ - acpi_status status; - acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); - struct pci_dev *pdev = dev; - struct pci_bus *parent; - u8 *path_name; - - /* - * Per PCI firmware specification, we should run the ACPI _OSC - * method to get control of hotplug hardware before using it. - * If an _OSC is missing, we look for an OSHP to do the same thing. - * To handle different BIOS behavior, we look for _OSC and OSHP - * within the scope of the hotplug controller and its parents, upto - * the host bridge under which this controller exists. - */ - while (!handle) { - /* - * This hotplug controller was not listed in the ACPI name - * space at all. Try to get acpi handle of parent pci bus. - */ - if (!pdev || !pdev->bus->parent) - break; - parent = pdev->bus->parent; - dbg("Could not find %s in acpi namespace, trying parent\n", - pci_name(pdev)); - if (!parent->self) - /* Parent must be a host bridge */ - handle = acpi_get_pci_rootbridge_handle( - pci_domain_nr(parent), - parent->number); - else - handle = DEVICE_ACPI_HANDLE( - &(parent->self->dev)); - pdev = parent->self; - } - - while (handle) { - path_name = acpi_path_name(handle); - dbg("Trying to get hotplug control for %s \n", path_name); - status = pci_osc_control_set(handle, - OSC_PCI_EXPRESS_NATIVE_HP_CONTROL); - if (status == AE_NOT_FOUND) - status = acpi_run_oshp(handle); - if (ACPI_SUCCESS(status)) { - dbg("Gained control for hotplug HW for pci %s (%s)\n", - pci_name(dev), path_name); - return 0; - } - if (is_root_bridge(handle)) - break; - chandle = handle; - status = acpi_get_parent(chandle, &handle); - if (ACPI_FAILURE(status)) - break; - } - - err("Cannot get control of hotplug hardware for pci %s\n", - pci_name(dev)); - return -1; -} - -void pciehp_get_hp_params_from_firmware(struct pci_dev *dev, - struct hotplug_params *hpp) -{ - acpi_status status = AE_NOT_FOUND; - struct pci_dev *pdev = dev; - - /* - * _HPP settings apply to all child buses, until another _HPP is - * encountered. If we don't find an _HPP for the input pci dev, - * look for it in the parent device scope since that would apply to - * this pci dev. If we don't find any _HPP, use hardcoded defaults - */ - while (pdev && (ACPI_FAILURE(status))) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&(pdev->dev)); - if (!handle) - break; - status = acpi_run_hpp(handle, hpp); - if (!(pdev->bus->parent)) - break; - /* Check if a parent object supports _HPP */ - pdev = pdev->bus->parent->self; - } -} - diff --git a/drivers/pci/hotplug/pciehprm_nonacpi.c b/drivers/pci/hotplug/pciehprm_nonacpi.c deleted file mode 100644 index 29180dfe849..00000000000 --- a/drivers/pci/hotplug/pciehprm_nonacpi.c +++ /dev/null @@ -1,47 +0,0 @@ -/* - * PCIEHPRM NONACPI: PHP Resource Manager for Non-ACPI/Legacy platform - * - * Copyright (C) 1995,2001 Compaq Computer Corporation - * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) - * Copyright (C) 2001 IBM Corp. - * Copyright (C) 2003-2004 Intel Corporation - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/pci.h> -#include <linux/slab.h> -#include "pciehp.h" - -void pciehp_get_hp_params_from_firmware(struct pci_dev *dev, - struct hotplug_params *hpp) -{ - return; -} - -int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) -{ - return 0; -} diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c index 3194d51c6ec..0a46f549676 100644 --- a/drivers/pci/hotplug/pcihp_skeleton.c +++ b/drivers/pci/hotplug/pcihp_skeleton.c @@ -37,10 +37,12 @@ #include <linux/init.h> #include "pci_hotplug.h" +#define SLOT_NAME_SIZE 10 struct slot { u8 number; struct hotplug_slot *hotplug_slot; struct list_head slot_list; + char name[SLOT_NAME_SIZE]; }; static LIST_HEAD(slot_list); @@ -233,12 +235,10 @@ static void release_slot(struct hotplug_slot *hotplug_slot) dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot->name); kfree(slot->hotplug_slot); kfree(slot); } -#define SLOT_NAME_SIZE 10 static void make_slot_name(struct slot *slot) { /* @@ -257,7 +257,6 @@ static int __init init_slots(void) struct slot *slot; struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; - char *name; int retval = -ENOMEM; int i; @@ -266,31 +265,23 @@ static int __init init_slots(void) * with the pci_hotplug subsystem. */ for (i = 0; i < num_slots; ++i) { - slot = kmalloc(sizeof(struct slot), GFP_KERNEL); + slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) goto error; - memset(slot, 0, sizeof(struct slot)); - hotplug_slot = kmalloc(sizeof(struct hotplug_slot), - GFP_KERNEL); + hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); if (!hotplug_slot) goto error_slot; - memset(hotplug_slot, 0, sizeof (struct hotplug_slot)); slot->hotplug_slot = hotplug_slot; - info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) goto error_hpslot; - memset(info, 0, sizeof (struct hotplug_slot_info)); hotplug_slot->info = info; - name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL); - if (!name) - goto error_info; - hotplug_slot->name = name; - slot->number = i; + hotplug_slot->name = slot->name; hotplug_slot->private = slot; hotplug_slot->release = &release_slot; make_slot_name(slot); @@ -300,16 +291,16 @@ static int __init init_slots(void) * Initialize the slot info structure with some known * good values. */ - info->power_status = get_power_status(slot); - info->attention_status = get_attention_status(slot); - info->latch_status = get_latch_status(slot); - info->adapter_status = get_adapter_status(slot); + get_power_status(hotplug_slot, &info->power_status); + get_attention_status(hotplug_slot, &info->attention_status); + get_latch_status(hotplug_slot, &info->latch_status); + get_adapter_status(hotplug_slot, &info->adapter_status); dbg("registering slot %d\n", i); retval = pci_hp_register(slot->hotplug_slot); if (retval) { err("pci_hp_register failed with error %d\n", retval); - goto error_name; + goto error_info; } /* add slot to our internal list */ @@ -317,8 +308,6 @@ static int __init init_slots(void) } return 0; -error_name: - kfree(name); error_info: kfree(info); error_hpslot: diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 78943e064b5..b771196a654 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -84,19 +84,16 @@ struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_ { struct slot *slot; - slot = kmalloc(sizeof (struct slot), GFP_KERNEL); + slot = kzalloc(sizeof(struct slot), GFP_KERNEL); if (!slot) goto error_nomem; - memset(slot, 0, sizeof (struct slot)); - slot->hotplug_slot = kmalloc(sizeof (struct hotplug_slot), GFP_KERNEL); + slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!slot->hotplug_slot) goto error_slot; - memset(slot->hotplug_slot, 0, sizeof (struct hotplug_slot)); - slot->hotplug_slot->info = kmalloc(sizeof (struct hotplug_slot_info), + slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!slot->hotplug_slot->info) goto error_hpslot; - memset(slot->hotplug_slot->info, 0, sizeof (struct hotplug_slot_info)); slot->hotplug_slot->name = kmalloc(BUS_ID_SIZE + 1, GFP_KERNEL); if (!slot->hotplug_slot->name) goto error_info; diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index a32ae82e592..c402da8e78a 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2005 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2005-2006 Silicon Graphics, Inc. All rights reserved. * * This work was based on the 2.4/2.6 kernel development by Dick Reigner. * Work to add BIOS PROM support was completed by Mike Habeck. @@ -230,6 +230,13 @@ static void sn_bus_free_data(struct pci_dev *dev) list_for_each_entry(child, &subordinate_bus->devices, bus_list) sn_bus_free_data(child); } + /* + * Some drivers may use dma accesses during the + * driver remove function. We release the sysdata + * areas after the driver remove functions have + * been called. + */ + sn_bus_store_sysdata(dev); sn_pci_unfixup_slot(dev); } @@ -429,13 +436,6 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) PCI_DEVFN(slot->device_num + 1, PCI_FUNC(func))); if (dev) { - /* - * Some drivers may use dma accesses during the - * driver remove function. We release the sysdata - * areas after the driver remove functions have - * been called. - */ - sn_bus_store_sysdata(dev); sn_bus_free_data(dev); pci_remove_bus_device(dev); pci_dev_put(dev); diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 7d6f521d02e..5c70f43908c 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -33,6 +33,7 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/sched.h> /* signal_pending(), struct timer_list */ +#include <linux/mutex.h> #include "pci_hotplug.h" @@ -45,6 +46,7 @@ extern int shpchp_poll_mode; extern int shpchp_poll_time; extern int shpchp_debug; +extern struct workqueue_struct *shpchp_wq; /*#define dbg(format, arg...) do { if (shpchp_debug) printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); } while (0)*/ #define dbg(format, arg...) do { if (shpchp_debug) printk("%s: " format, MY_NAME , ## arg); } while (0) @@ -52,10 +54,8 @@ extern int shpchp_debug; #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) -#define SLOT_MAGIC 0x67267321 +#define SLOT_NAME_SIZE 10 struct slot { - u32 magic; - struct slot *next; u8 bus; u8 device; u16 status; @@ -70,26 +70,27 @@ struct slot { struct hpc_ops *hpc_ops; struct hotplug_slot *hotplug_slot; struct list_head slot_list; + char name[SLOT_NAME_SIZE]; + struct work_struct work; /* work for button event */ + struct mutex lock; }; struct event_info { u32 event_type; - u8 hp_slot; + struct slot *p_slot; + struct work_struct work; }; struct controller { - struct controller *next; - struct semaphore crit_sect; /* critical section semaphore */ + struct mutex crit_sect; /* critical section mutex */ + struct mutex cmd_lock; /* command lock */ struct php_ctlr_state_s *hpc_ctlr_handle; /* HPC controller handle */ int num_slots; /* Number of slots on ctlr */ int slot_num_inc; /* 1 or -1 */ struct pci_dev *pci_dev; - struct pci_bus *pci_bus; - struct event_info event_queue[10]; - struct slot *slot; + struct list_head slot_list; struct hpc_ops *hpc_ops; wait_queue_head_t queue; /* sleep & wake process */ - u8 next_event; u8 bus; u8 device; u8 function; @@ -105,12 +106,6 @@ struct controller { volatile int cmd_busy; }; -struct hotplug_params { - u8 cache_line_size; - u8 latency_timer; - u8 enable_serr; - u8 enable_perr; -}; /* Define AMD SHPC ID */ #define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 @@ -180,11 +175,8 @@ struct hotplug_params { /* sysfs functions for the hotplug controller info */ extern void shpchp_create_ctrl_files (struct controller *ctrl); -/* controller functions */ -extern int shpchp_event_start_thread(void); -extern void shpchp_event_stop_thread(void); -extern int shpchp_enable_slot(struct slot *slot); -extern int shpchp_disable_slot(struct slot *slot); +extern int shpchp_sysfs_enable_slot(struct slot *slot); +extern int shpchp_sysfs_disable_slot(struct slot *slot); extern u8 shpchp_handle_attention_button(u8 hp_slot, void *inst_id); extern u8 shpchp_handle_switch_change(u8 hp_slot, void *inst_id); @@ -195,16 +187,28 @@ extern u8 shpchp_handle_power_fault(u8 hp_slot, void *inst_id); extern int shpchp_save_config(struct controller *ctrl, int busnumber, int num_ctlr_slots, int first_device_num); extern int shpchp_configure_device(struct slot *p_slot); extern int shpchp_unconfigure_device(struct slot *p_slot); -extern void get_hp_hw_control_from_firmware(struct pci_dev *dev); -extern void get_hp_params_from_firmware(struct pci_dev *dev, - struct hotplug_params *hpp); -extern int shpchprm_get_physical_slot_number(struct controller *ctrl, - u32 *sun, u8 busnum, u8 devnum); extern void shpchp_remove_ctrl_files(struct controller *ctrl); +extern void cleanup_slots(struct controller *ctrl); +extern void queue_pushbutton_work(void *data); -/* Global variables */ -extern struct controller *shpchp_ctrl_list; +#ifdef CONFIG_ACPI +static inline int get_hp_params_from_firmware(struct pci_dev *dev, + struct hotplug_params *hpp) +{ + if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev, hpp))) + return -ENODEV; + return 0; +} +#define get_hp_hw_control_from_firmware(pdev) \ + do { \ + if (DEVICE_ACPI_HANDLE(&(pdev->dev))) \ + acpi_run_oshp(DEVICE_ACPI_HANDLE(&(pdev->dev))); \ + } while (0) +#else +#define get_hp_params_from_firmware(dev, hpp) (-ENODEV) +#define get_hp_hw_control_from_firmware(dev) do { } while (0) +#endif struct ctrl_reg { volatile u32 base_offset; @@ -286,10 +290,6 @@ static inline int slot_paranoia_check (struct slot *slot, const char *function) dbg("%s - slot == NULL", function); return -1; } - if (slot->magic != SLOT_MAGIC) { - dbg("%s - bad magic number for slot", function); - return -1; - } if (!slot->hotplug_slot) { dbg("%s - slot->hotplug_slot == NULL!", function); return -1; @@ -314,44 +314,19 @@ static inline struct slot *get_slot (struct hotplug_slot *hotplug_slot, const ch static inline struct slot *shpchp_find_slot (struct controller *ctrl, u8 device) { - struct slot *p_slot, *tmp_slot = NULL; + struct slot *slot; if (!ctrl) return NULL; - p_slot = ctrl->slot; - - while (p_slot && (p_slot->device != device)) { - tmp_slot = p_slot; - p_slot = p_slot->next; + list_for_each_entry(slot, &ctrl->slot_list, slot_list) { + if (slot->device == device) + return slot; } - if (p_slot == NULL) { - err("ERROR: shpchp_find_slot device=0x%x\n", device); - p_slot = tmp_slot; - } - - return (p_slot); -} - -static inline int wait_for_ctrl_irq (struct controller *ctrl) -{ - DECLARE_WAITQUEUE(wait, current); - int retval = 0; - - add_wait_queue(&ctrl->queue, &wait); - if (!shpchp_poll_mode) { - /* Sleep for up to 1 second */ - msleep_interruptible(1000); - } else { - /* Sleep for up to 2 seconds */ - msleep_interruptible(2000); - } - remove_wait_queue(&ctrl->queue, &wait); - if (signal_pending(current)) - retval = -EINTR; + err("%s: slot (device=0x%x) not found\n", __FUNCTION__, device); - return retval; + return NULL; } static inline void amd_pogo_errata_save_misc_reg(struct slot *p_slot) @@ -427,13 +402,6 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot) pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp); } -#define SLOT_NAME_SIZE 10 - -static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot) -{ - snprintf(buffer, buffer_size, "%04d_%04d", slot->bus, slot->number); -} - enum php_ctlr_type { PCI, ISA, diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index a2b3f0010ce..3be4d492ccc 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -32,13 +32,14 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> +#include <linux/workqueue.h> #include "shpchp.h" /* Global variables */ int shpchp_debug; int shpchp_poll_mode; int shpchp_poll_time; -struct controller *shpchp_ctrl_list; /* = NULL */ +struct workqueue_struct *shpchp_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" @@ -57,7 +58,6 @@ MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); #define SHPC_MODULE_NAME "shpchp" -static int shpc_start_thread (void); static int set_attention_status (struct hotplug_slot *slot, u8 value); static int enable_slot (struct hotplug_slot *slot); static int disable_slot (struct hotplug_slot *slot); @@ -94,107 +94,120 @@ static void release_slot(struct hotplug_slot *hotplug_slot) dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot->name); kfree(slot->hotplug_slot); kfree(slot); } +static void make_slot_name(struct slot *slot) +{ + snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d", + slot->bus, slot->number); +} + + + + +static int +shpchprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, + u8 busnum, u8 devnum) +{ + int offset = devnum - ctrl->slot_device_offset; + + dbg("%s: ctrl->slot_num_inc %d, offset %d\n", __FUNCTION__, + ctrl->slot_num_inc, offset); + *sun = (u8) (ctrl->first_slot + ctrl->slot_num_inc *offset); + return 0; +} + + + static int init_slots(struct controller *ctrl) { - struct slot *new_slot; - u8 number_of_slots; - u8 slot_device; - u32 slot_number, sun; - int result = -ENOMEM; - - number_of_slots = ctrl->num_slots; - slot_device = ctrl->slot_device_offset; - slot_number = ctrl->first_slot; - - while (number_of_slots) { - new_slot = (struct slot *) kmalloc(sizeof(struct slot), GFP_KERNEL); - if (!new_slot) + struct slot *slot; + struct hotplug_slot *hotplug_slot; + struct hotplug_slot_info *info; + int retval = -ENOMEM; + int i; + u32 sun; + + for (i = 0; i < ctrl->num_slots; i++) { + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) goto error; - memset(new_slot, 0, sizeof(struct slot)); - new_slot->hotplug_slot = kmalloc (sizeof (struct hotplug_slot), GFP_KERNEL); - if (!new_slot->hotplug_slot) + hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); + if (!hotplug_slot) goto error_slot; - memset(new_slot->hotplug_slot, 0, sizeof (struct hotplug_slot)); + slot->hotplug_slot = hotplug_slot; - new_slot->hotplug_slot->info = kmalloc (sizeof (struct hotplug_slot_info), GFP_KERNEL); - if (!new_slot->hotplug_slot->info) + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) goto error_hpslot; - memset(new_slot->hotplug_slot->info, 0, sizeof (struct hotplug_slot_info)); - new_slot->hotplug_slot->name = kmalloc (SLOT_NAME_SIZE, GFP_KERNEL); - if (!new_slot->hotplug_slot->name) - goto error_info; + hotplug_slot->info = info; + + hotplug_slot->name = slot->name; - new_slot->magic = SLOT_MAGIC; - new_slot->ctrl = ctrl; - new_slot->bus = ctrl->slot_bus; - new_slot->device = slot_device; - new_slot->hpc_ops = ctrl->hpc_ops; + slot->hp_slot = i; + slot->ctrl = ctrl; + slot->bus = ctrl->slot_bus; + slot->device = ctrl->slot_device_offset + i; + slot->hpc_ops = ctrl->hpc_ops; + mutex_init(&slot->lock); if (shpchprm_get_physical_slot_number(ctrl, &sun, - new_slot->bus, new_slot->device)) - goto error_name; + slot->bus, slot->device)) + goto error_info; - new_slot->number = sun; - new_slot->hp_slot = slot_device - ctrl->slot_device_offset; + slot->number = sun; + INIT_WORK(&slot->work, queue_pushbutton_work, slot); /* register this slot with the hotplug pci core */ - new_slot->hotplug_slot->private = new_slot; - new_slot->hotplug_slot->release = &release_slot; - make_slot_name(new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot); - new_slot->hotplug_slot->ops = &shpchp_hotplug_slot_ops; - - new_slot->hpc_ops->get_power_status(new_slot, &(new_slot->hotplug_slot->info->power_status)); - new_slot->hpc_ops->get_attention_status(new_slot, &(new_slot->hotplug_slot->info->attention_status)); - new_slot->hpc_ops->get_latch_status(new_slot, &(new_slot->hotplug_slot->info->latch_status)); - new_slot->hpc_ops->get_adapter_status(new_slot, &(new_slot->hotplug_slot->info->adapter_status)); - - dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x slot_device_offset=%x\n", new_slot->bus, - new_slot->device, new_slot->hp_slot, new_slot->number, ctrl->slot_device_offset); - result = pci_hp_register (new_slot->hotplug_slot); - if (result) { - err ("pci_hp_register failed with error %d\n", result); - goto error_name; + hotplug_slot->private = slot; + hotplug_slot->release = &release_slot; + make_slot_name(slot); + hotplug_slot->ops = &shpchp_hotplug_slot_ops; + + get_power_status(hotplug_slot, &info->power_status); + get_attention_status(hotplug_slot, &info->attention_status); + get_latch_status(hotplug_slot, &info->latch_status); + get_adapter_status(hotplug_slot, &info->adapter_status); + + dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " + "slot_device_offset=%x\n", slot->bus, slot->device, + slot->hp_slot, slot->number, ctrl->slot_device_offset); + retval = pci_hp_register(slot->hotplug_slot); + if (retval) { + err("pci_hp_register failed with error %d\n", retval); + goto error_info; } - new_slot->next = ctrl->slot; - ctrl->slot = new_slot; - - number_of_slots--; - slot_device++; - slot_number += ctrl->slot_num_inc; + list_add(&slot->slot_list, &ctrl->slot_list); } return 0; - -error_name: - kfree(new_slot->hotplug_slot->name); error_info: - kfree(new_slot->hotplug_slot->info); + kfree(info); error_hpslot: - kfree(new_slot->hotplug_slot); + kfree(hotplug_slot); error_slot: - kfree(new_slot); + kfree(slot); error: - return result; + return retval; } -static void cleanup_slots(struct controller *ctrl) +void cleanup_slots(struct controller *ctrl) { - struct slot *old_slot, *next_slot; - - old_slot = ctrl->slot; - ctrl->slot = NULL; - - while (old_slot) { - next_slot = old_slot->next; - pci_hp_deregister(old_slot->hotplug_slot); - old_slot = next_slot; + struct list_head *tmp; + struct list_head *next; + struct slot *slot; + + list_for_each_safe(tmp, next, &ctrl->slot_list) { + slot = list_entry(tmp, struct slot, slot_list); + list_del(&slot->slot_list); + cancel_delayed_work(&slot->work); + flush_scheduled_work(); + flush_workqueue(shpchp_wq); + pci_hp_deregister(slot->hotplug_slot); } } @@ -207,9 +220,12 @@ static int get_ctlr_slot_config(struct controller *ctrl) int rc; int flags; - rc = shpc_get_ctlr_slot_config(ctrl, &num_ctlr_slots, &first_device_num, &physical_slot_num, &updown, &flags); + rc = shpc_get_ctlr_slot_config(ctrl, &num_ctlr_slots, + &first_device_num, &physical_slot_num, + &updown, &flags); if (rc) { - err("%s: get_ctlr_slot_config fail for b:d (%x:%x)\n", __FUNCTION__, ctrl->bus, ctrl->device); + err("%s: get_ctlr_slot_config fail for b:d (%x:%x)\n", + __FUNCTION__, ctrl->bus, ctrl->device); return -1; } @@ -218,19 +234,19 @@ static int get_ctlr_slot_config(struct controller *ctrl) ctrl->first_slot = physical_slot_num; ctrl->slot_num_inc = updown; /* either -1 or 1 */ - dbg("%s: num_slot(0x%x) 1st_dev(0x%x) psn(0x%x) updown(%d) for b:d (%x:%x)\n", - __FUNCTION__, num_ctlr_slots, first_device_num, physical_slot_num, updown, ctrl->bus, ctrl->device); + dbg("%s: num_slot(0x%x) 1st_dev(0x%x) psn(0x%x) updown(%d) for b:d " + "(%x:%x)\n", __FUNCTION__, num_ctlr_slots, first_device_num, + physical_slot_num, updown, ctrl->bus, ctrl->device); return 0; } - /* * set_attention_status - Turns the Amber LED for a slot on, off or blink */ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); @@ -240,29 +256,27 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status) return 0; } - static int enable_slot (struct hotplug_slot *hotplug_slot) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); - return shpchp_enable_slot(slot); + return shpchp_sysfs_enable_slot(slot); } - static int disable_slot (struct hotplug_slot *hotplug_slot) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); - return shpchp_disable_slot(slot); + return shpchp_sysfs_disable_slot(slot); } static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); int retval; dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); @@ -276,7 +290,7 @@ static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value) static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); int retval; dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); @@ -290,7 +304,7 @@ static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value) static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); int retval; dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); @@ -304,7 +318,7 @@ static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value) static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); int retval; dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); @@ -318,7 +332,7 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) static int get_address (struct hotplug_slot *hotplug_slot, u32 *value) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); struct pci_bus *bus = slot->ctrl->pci_dev->subordinate; dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); @@ -330,11 +344,11 @@ static int get_address (struct hotplug_slot *hotplug_slot, u32 *value) static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); int retval; dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); - + retval = slot->hpc_ops->get_max_bus_speed(slot, value); if (retval < 0) *value = PCI_SPEED_UNKNOWN; @@ -344,11 +358,11 @@ static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) { - struct slot *slot = get_slot (hotplug_slot, __FUNCTION__); + struct slot *slot = get_slot(hotplug_slot, __FUNCTION__); int retval; dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); - + retval = slot->hpc_ops->get_cur_bus_speed(slot, value); if (retval < 0) *value = PCI_SPEED_UNKNOWN; @@ -372,61 +386,54 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int rc; struct controller *ctrl; struct slot *t_slot; - int first_device_num; /* first PCI device number supported by this SHPC */ - int num_ctlr_slots; /* number of slots supported by this SHPC */ + int first_device_num; /* first PCI device number */ + int num_ctlr_slots; /* number of slots implemented */ if (!is_shpc_capable(pdev)) return -ENODEV; - ctrl = (struct controller *) kmalloc(sizeof(struct controller), GFP_KERNEL); + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { err("%s : out of memory\n", __FUNCTION__); goto err_out_none; } - memset(ctrl, 0, sizeof(struct controller)); + INIT_LIST_HEAD(&ctrl->slot_list); rc = shpc_init(ctrl, pdev); if (rc) { - dbg("%s: controller initialization failed\n", SHPC_MODULE_NAME); + dbg("%s: controller initialization failed\n", + SHPC_MODULE_NAME); goto err_out_free_ctrl; } pci_set_drvdata(pdev, ctrl); - ctrl->pci_bus = kmalloc (sizeof (*ctrl->pci_bus), GFP_KERNEL); - if (!ctrl->pci_bus) { - err("out of memory\n"); - rc = -ENOMEM; - goto err_out_unmap_mmio_region; - } - - memcpy (ctrl->pci_bus, pdev->bus, sizeof (*ctrl->pci_bus)); ctrl->bus = pdev->bus->number; ctrl->slot_bus = pdev->subordinate->number; - ctrl->device = PCI_SLOT(pdev->devfn); ctrl->function = PCI_FUNC(pdev->devfn); - dbg("ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", ctrl->bus, ctrl->device, ctrl->function, pdev->irq); + + dbg("ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", + ctrl->bus, ctrl->device, ctrl->function, pdev->irq); /* - * Save configuration headers for this and subordinate PCI buses + * Save configuration headers for this and subordinate PCI buses */ - rc = get_ctlr_slot_config(ctrl); if (rc) { err(msg_initialization_err, rc); - goto err_out_free_ctrl_bus; + goto err_out_release_ctlr; } first_device_num = ctrl->slot_device_offset; num_ctlr_slots = ctrl->num_slots; ctrl->add_support = 1; - + /* Setup the slot information structures */ rc = init_slots(ctrl); if (rc) { err(msg_initialization_err, 6); - goto err_out_free_ctrl_slot; + goto err_out_release_ctlr; } /* Now hpc_functions (slot->hpc_ops->functions) are ready */ @@ -437,30 +444,16 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dbg("%s: t_slot->hp_slot %x\n", __FUNCTION__,t_slot->hp_slot); if (rc || ctrl->speed == PCI_SPEED_UNKNOWN) { - err(SHPC_MODULE_NAME ": Can't get current bus speed. Set to 33MHz PCI.\n"); + err(SHPC_MODULE_NAME ": Can't get current bus speed. " + "Set to 33MHz PCI.\n"); ctrl->speed = PCI_SPEED_33MHz; } - /* Finish setting up the hot plug ctrl device */ - ctrl->next_event = 0; - - if (!shpchp_ctrl_list) { - shpchp_ctrl_list = ctrl; - ctrl->next = NULL; - } else { - ctrl->next = shpchp_ctrl_list; - shpchp_ctrl_list = ctrl; - } - shpchp_create_ctrl_files(ctrl); return 0; -err_out_free_ctrl_slot: - cleanup_slots(ctrl); -err_out_free_ctrl_bus: - kfree(ctrl->pci_bus); -err_out_unmap_mmio_region: +err_out_release_ctlr: ctrl->hpc_ops->release_ctlr(ctrl); err_out_free_ctrl: kfree(ctrl); @@ -468,74 +461,28 @@ err_out_none: return -ENODEV; } - -static int shpc_start_thread(void) -{ - int retval = 0; - - dbg("Initialize + Start the notification/polling mechanism \n"); - - retval = shpchp_event_start_thread(); - if (retval) { - dbg("shpchp_event_start_thread() failed\n"); - return retval; - } - - return retval; -} - -static void __exit unload_shpchpd(void) +static void shpc_remove(struct pci_dev *dev) { - struct controller *ctrl; - struct controller *tctrl; - - ctrl = shpchp_ctrl_list; - - while (ctrl) { - shpchp_remove_ctrl_files(ctrl); - cleanup_slots(ctrl); - - kfree (ctrl->pci_bus); - ctrl->hpc_ops->release_ctlr(ctrl); - - tctrl = ctrl; - ctrl = ctrl->next; - - kfree(tctrl); - } - - /* Stop the notification mechanism */ - shpchp_event_stop_thread(); + struct controller *ctrl = pci_get_drvdata(dev); + shpchp_remove_ctrl_files(ctrl); + ctrl->hpc_ops->release_ctlr(ctrl); + kfree(ctrl); } - static struct pci_device_id shpcd_pci_tbl[] = { - { - .class = ((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), - .class_mask = ~0, - .vendor = PCI_ANY_ID, - .device = PCI_ANY_ID, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, - + {PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0)}, { /* end: all zeroes */ } }; - MODULE_DEVICE_TABLE(pci, shpcd_pci_tbl); - - static struct pci_driver shpc_driver = { .name = SHPC_MODULE_NAME, .id_table = shpcd_pci_tbl, .probe = shpc_probe, - /* remove: shpc_remove_one, */ + .remove = shpc_remove, }; - - static int __init shpcd_init(void) { int retval = 0; @@ -544,17 +491,15 @@ static int __init shpcd_init(void) shpchp_poll_mode = 1; #endif - retval = shpc_start_thread(); - if (retval) - goto error_hpc_init; + shpchp_wq = create_singlethread_workqueue("shpchpd"); + if (!shpchp_wq) + return -ENOMEM; retval = pci_register_driver(&shpc_driver); dbg("%s: pci_register_driver = %d\n", __FUNCTION__, retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); - -error_hpc_init: if (retval) { - shpchp_event_stop_thread(); + destroy_workqueue(shpchp_wq); } return retval; } @@ -562,10 +507,8 @@ error_hpc_init: static void __exit shpcd_cleanup(void) { dbg("unload_shpchpd()\n"); - unload_shpchpd(); - pci_unregister_driver(&shpc_driver); - + destroy_workqueue(shpchp_wq); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index 643252d9bf3..4e6381481c5 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -32,65 +32,50 @@ #include <linux/types.h> #include <linux/smp_lock.h> #include <linux/pci.h> +#include <linux/workqueue.h> #include "../pci.h" #include "shpchp.h" -static void interrupt_event_handler(struct controller *ctrl); +static void interrupt_event_handler(void *data); +static int shpchp_enable_slot(struct slot *p_slot); +static int shpchp_disable_slot(struct slot *p_slot); -static struct semaphore event_semaphore; /* mutex for process loop (up if something to process) */ -static struct semaphore event_exit; /* guard ensure thread has exited before calling it quits */ -static int event_finished; -static unsigned long pushbutton_pending; /* = 0 */ +static int queue_interrupt_event(struct slot *p_slot, u32 event_type) +{ + struct event_info *info; + + info = kmalloc(sizeof(*info), GFP_ATOMIC); + if (!info) + return -ENOMEM; + + info->event_type = event_type; + info->p_slot = p_slot; + INIT_WORK(&info->work, interrupt_event_handler, info); + + schedule_work(&info->work); + + return 0; +} u8 shpchp_handle_attention_button(u8 hp_slot, void *inst_id) { struct controller *ctrl = (struct controller *) inst_id; struct slot *p_slot; - u8 rc = 0; - u8 getstatus; - struct event_info *taskInfo; + u32 event_type; /* Attention Button Change */ dbg("shpchp: Attention button interrupt received.\n"); - /* This is the structure that tells the worker thread what to do */ - taskInfo = &(ctrl->event_queue[ctrl->next_event]); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); - p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); - p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); - - ctrl->next_event = (ctrl->next_event + 1) % 10; - taskInfo->hp_slot = hp_slot; - - rc++; /* * Button pressed - See if need to TAKE ACTION!!! */ info("Button pressed on Slot(%d)\n", ctrl->first_slot + hp_slot); - taskInfo->event_type = INT_BUTTON_PRESS; - - if ((p_slot->state == BLINKINGON_STATE) - || (p_slot->state == BLINKINGOFF_STATE)) { - /* Cancel if we are still blinking; this means that we press the - * attention again before the 5 sec. limit expires to cancel hot-add - * or hot-remove - */ - taskInfo->event_type = INT_BUTTON_CANCEL; - info("Button cancel on Slot(%d)\n", ctrl->first_slot + hp_slot); - } else if ((p_slot->state == POWERON_STATE) - || (p_slot->state == POWEROFF_STATE)) { - /* Ignore if the slot is on power-on or power-off state; this - * means that the previous attention button action to hot-add or - * hot-remove is undergoing - */ - taskInfo->event_type = INT_BUTTON_IGNORE; - info("Button ignore on Slot(%d)\n", ctrl->first_slot + hp_slot); - } + event_type = INT_BUTTON_PRESS; - if (rc) - up(&event_semaphore); /* signal event thread that new event is posted */ + queue_interrupt_event(p_slot, event_type); return 0; @@ -100,21 +85,12 @@ u8 shpchp_handle_switch_change(u8 hp_slot, void *inst_id) { struct controller *ctrl = (struct controller *) inst_id; struct slot *p_slot; - u8 rc = 0; u8 getstatus; - struct event_info *taskInfo; + u32 event_type; /* Switch Change */ dbg("shpchp: Switch interrupt received.\n"); - /* This is the structure that tells the worker thread - * what to do - */ - taskInfo = &(ctrl->event_queue[ctrl->next_event]); - ctrl->next_event = (ctrl->next_event + 1) % 10; - taskInfo->hp_slot = hp_slot; - - rc++; p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); @@ -126,9 +102,9 @@ u8 shpchp_handle_switch_change(u8 hp_slot, void *inst_id) * Switch opened */ info("Latch open on Slot(%d)\n", ctrl->first_slot + hp_slot); - taskInfo->event_type = INT_SWITCH_OPEN; + event_type = INT_SWITCH_OPEN; if (p_slot->pwr_save && p_slot->presence_save) { - taskInfo->event_type = INT_POWER_FAULT; + event_type = INT_POWER_FAULT; err("Surprise Removal of card\n"); } } else { @@ -136,34 +112,23 @@ u8 shpchp_handle_switch_change(u8 hp_slot, void *inst_id) * Switch closed */ info("Latch close on Slot(%d)\n", ctrl->first_slot + hp_slot); - taskInfo->event_type = INT_SWITCH_CLOSE; + event_type = INT_SWITCH_CLOSE; } - if (rc) - up(&event_semaphore); /* signal event thread that new event is posted */ + queue_interrupt_event(p_slot, event_type); - return rc; + return 1; } u8 shpchp_handle_presence_change(u8 hp_slot, void *inst_id) { struct controller *ctrl = (struct controller *) inst_id; struct slot *p_slot; - u8 rc = 0; - /*u8 temp_byte;*/ - struct event_info *taskInfo; + u32 event_type; /* Presence Change */ dbg("shpchp: Presence/Notify input change.\n"); - /* This is the structure that tells the worker thread - * what to do - */ - taskInfo = &(ctrl->event_queue[ctrl->next_event]); - ctrl->next_event = (ctrl->next_event + 1) % 10; - taskInfo->hp_slot = hp_slot; - - rc++; p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* @@ -175,39 +140,29 @@ u8 shpchp_handle_presence_change(u8 hp_slot, void *inst_id) * Card Present */ info("Card present on Slot(%d)\n", ctrl->first_slot + hp_slot); - taskInfo->event_type = INT_PRESENCE_ON; + event_type = INT_PRESENCE_ON; } else { /* * Not Present */ info("Card not present on Slot(%d)\n", ctrl->first_slot + hp_slot); - taskInfo->event_type = INT_PRESENCE_OFF; + event_type = INT_PRESENCE_OFF; } - if (rc) - up(&event_semaphore); /* signal event thread that new event is posted */ + queue_interrupt_event(p_slot, event_type); - return rc; + return 1; } u8 shpchp_handle_power_fault(u8 hp_slot, void *inst_id) { struct controller *ctrl = (struct controller *) inst_id; struct slot *p_slot; - u8 rc = 0; - struct event_info *taskInfo; + u32 event_type; /* Power fault */ dbg("shpchp: Power fault interrupt received.\n"); - /* This is the structure that tells the worker thread - * what to do - */ - taskInfo = &(ctrl->event_queue[ctrl->next_event]); - ctrl->next_event = (ctrl->next_event + 1) % 10; - taskInfo->hp_slot = hp_slot; - - rc++; p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { @@ -216,21 +171,21 @@ u8 shpchp_handle_power_fault(u8 hp_slot, void *inst_id) */ info("Power fault cleared on Slot(%d)\n", ctrl->first_slot + hp_slot); p_slot->status = 0x00; - taskInfo->event_type = INT_POWER_FAULT_CLEAR; + event_type = INT_POWER_FAULT_CLEAR; } else { /* * Power fault */ info("Power fault on Slot(%d)\n", ctrl->first_slot + hp_slot); - taskInfo->event_type = INT_POWER_FAULT; + event_type = INT_POWER_FAULT; /* set power fault status for this board */ p_slot->status = 0xFF; info("power fault bit %x set\n", hp_slot); } - if (rc) - up(&event_semaphore); /* signal event thread that new event is posted */ - return rc; + queue_interrupt_event(p_slot, event_type); + + return 1; } /* The following routines constitute the bulk of the @@ -242,21 +197,11 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot, int rc = 0; dbg("%s: change to speed %d\n", __FUNCTION__, speed); - down(&ctrl->crit_sect); if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) { - err("%s: Issue of set bus speed mode command failed\n", __FUNCTION__); - up(&ctrl->crit_sect); + err("%s: Issue of set bus speed mode command failed\n", + __FUNCTION__); return WRONG_BUS_FREQUENCY; } - - if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) { - err("%s: Can't set bus speed/mode in the case of adapter & bus mismatch\n", - __FUNCTION__); - err("%s: Error code (%d)\n", __FUNCTION__, rc); - up(&ctrl->crit_sect); - return WRONG_BUS_FREQUENCY; - } - up(&ctrl->crit_sect); return rc; } @@ -265,33 +210,26 @@ static int fix_bus_speed(struct controller *ctrl, struct slot *pslot, enum pci_bus_speed msp) { int rc = 0; - - if (flag != 0) { /* Other slots on the same bus are occupied */ - if ( asp < bsp ) { - err("%s: speed of bus %x and adapter %x mismatch\n", __FUNCTION__, bsp, asp); - return WRONG_BUS_FREQUENCY; + + /* + * If other slots on the same bus are occupied, we cannot + * change the bus speed. + */ + if (flag) { + if (asp < bsp) { + err("%s: speed of bus %x and adapter %x mismatch\n", + __FUNCTION__, bsp, asp); + rc = WRONG_BUS_FREQUENCY; } + return rc; + } + + if (asp < msp) { + if (bsp != asp) + rc = change_bus_speed(ctrl, pslot, asp); } else { - /* Other slots on the same bus are empty */ - if (msp == bsp) { - /* if adapter_speed >= bus_speed, do nothing */ - if (asp < bsp) { - /* - * Try to lower bus speed to accommodate the adapter if other slots - * on the same controller are empty - */ - if ((rc = change_bus_speed(ctrl, pslot, asp))) - return rc; - } - } else { - if (asp < msp) { - if ((rc = change_bus_speed(ctrl, pslot, asp))) - return rc; - } else { - if ((rc = change_bus_speed(ctrl, pslot, msp))) - return rc; - } - } + if (bsp != msp) + rc = change_bus_speed(ctrl, pslot, msp); } return rc; } @@ -308,8 +246,7 @@ static int board_added(struct slot *p_slot) u8 hp_slot; u8 slots_not_empty = 0; int rc = 0; - enum pci_bus_speed adapter_speed, bus_speed, max_bus_speed; - u8 pi, mode; + enum pci_bus_speed asp, bsp, msp; struct controller *ctrl = p_slot->ctrl; hp_slot = p_slot->device - ctrl->slot_device_offset; @@ -318,187 +255,68 @@ static int board_added(struct slot *p_slot) __FUNCTION__, p_slot->device, ctrl->slot_device_offset, hp_slot); - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - /* Power on slot without connecting to bus */ rc = p_slot->hpc_ops->power_on_slot(p_slot); if (rc) { err("%s: Failed to power on slot\n", __FUNCTION__); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); return -1; } - rc = p_slot->hpc_ops->check_cmd_status(ctrl); - if (rc) { - err("%s: Failed to power on slot, error code(%d)\n", __FUNCTION__, rc); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - return -1; - } - - if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { if (slots_not_empty) return WRONG_BUS_FREQUENCY; if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { err("%s: Issue of set bus speed mode command failed\n", __FUNCTION__); - up(&ctrl->crit_sect); return WRONG_BUS_FREQUENCY; } - if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) { - err("%s: Can't set bus speed/mode in the case of adapter & bus mismatch\n", - __FUNCTION__); - err("%s: Error code (%d)\n", __FUNCTION__, rc); - up(&ctrl->crit_sect); - return WRONG_BUS_FREQUENCY; - } /* turn on board, blink green LED, turn off Amber LED */ if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { err("%s: Issue of Slot Enable command failed\n", __FUNCTION__); - up(&ctrl->crit_sect); return rc; } - - if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) { - err("%s: Failed to enable slot, error code(%d)\n", __FUNCTION__, rc); - up(&ctrl->crit_sect); - return rc; - } } - rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &adapter_speed); - /* 0 = PCI 33Mhz, 1 = PCI 66 Mhz, 2 = PCI-X 66 PA, 4 = PCI-X 66 ECC, */ - /* 5 = PCI-X 133 PA, 7 = PCI-X 133 ECC, 0xa = PCI-X 133 Mhz 266, */ - /* 0xd = PCI-X 133 Mhz 533 */ - /* This encoding is different from the one used in cur_bus_speed & */ - /* max_bus_speed */ - - if (rc || adapter_speed == PCI_SPEED_UNKNOWN) { - err("%s: Can't get adapter speed or bus mode mismatch\n", __FUNCTION__); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); + rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); + if (rc) { + err("%s: Can't get adapter speed or bus mode mismatch\n", + __FUNCTION__); return WRONG_BUS_FREQUENCY; } - rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bus_speed); - if (rc || bus_speed == PCI_SPEED_UNKNOWN) { + rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); + if (rc) { err("%s: Can't get bus operation speed\n", __FUNCTION__); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); return WRONG_BUS_FREQUENCY; } - rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &max_bus_speed); - if (rc || max_bus_speed == PCI_SPEED_UNKNOWN) { + rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); + if (rc) { err("%s: Can't get max bus operation speed\n", __FUNCTION__); - max_bus_speed = bus_speed; - } - - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - - if ((rc = p_slot->hpc_ops->get_prog_int(p_slot, &pi))) { - err("%s: Can't get controller programming interface, set it to 1\n", __FUNCTION__); - pi = 1; + msp = bsp; } /* Check if there are other slots or devices on the same bus */ if (!list_empty(&ctrl->pci_dev->subordinate->devices)) slots_not_empty = 1; - dbg("%s: slots_not_empty %d, pi %d\n", __FUNCTION__, - slots_not_empty, pi); - dbg("adapter_speed %d, bus_speed %d, max_bus_speed %d\n", - adapter_speed, bus_speed, max_bus_speed); - - if (pi == 2) { - dbg("%s: In PI = %d\n", __FUNCTION__, pi); - if ((rc = p_slot->hpc_ops->get_mode1_ECC_cap(p_slot, &mode))) { - err("%s: Can't get Mode1_ECC, set mode to 0\n", __FUNCTION__); - mode = 0; - } + dbg("%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, " + "max_bus_speed %d\n", __FUNCTION__, slots_not_empty, asp, + bsp, msp); - switch (adapter_speed) { - case PCI_SPEED_133MHz_PCIX_533: - case PCI_SPEED_133MHz_PCIX_266: - if ((bus_speed != adapter_speed) && - ((rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, adapter_speed, bus_speed, max_bus_speed)))) - return rc; - break; - case PCI_SPEED_133MHz_PCIX_ECC: - case PCI_SPEED_133MHz_PCIX: - if (mode) { /* Bus - Mode 1 ECC */ - if ((bus_speed != 0x7) && - ((rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, adapter_speed, bus_speed, max_bus_speed)))) - return rc; - } else { - if ((bus_speed != 0x4) && - ((rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, adapter_speed, bus_speed, max_bus_speed)))) - return rc; - } - break; - case PCI_SPEED_66MHz_PCIX_ECC: - case PCI_SPEED_66MHz_PCIX: - if (mode) { /* Bus - Mode 1 ECC */ - if ((bus_speed != 0x5) && - ((rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, adapter_speed, bus_speed, max_bus_speed)))) - return rc; - } else { - if ((bus_speed != 0x2) && - ((rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, adapter_speed, bus_speed, max_bus_speed)))) - return rc; - } - break; - case PCI_SPEED_66MHz: - if ((bus_speed != 0x1) && - ((rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, adapter_speed, bus_speed, max_bus_speed)))) - return rc; - break; - case PCI_SPEED_33MHz: - if (bus_speed > 0x0) { - if (slots_not_empty == 0) { - if ((rc = change_bus_speed(ctrl, p_slot, adapter_speed))) - return rc; - } else { - err("%s: speed of bus %x and adapter %x mismatch\n", __FUNCTION__, bus_speed, adapter_speed); - return WRONG_BUS_FREQUENCY; - } - } - break; - default: - err("%s: speed of bus %x and adapter %x mismatch\n", __FUNCTION__, bus_speed, adapter_speed); - return WRONG_BUS_FREQUENCY; - } - } else { - /* If adpater_speed == bus_speed, nothing to do here */ - dbg("%s: In PI = %d\n", __FUNCTION__, pi); - if ((adapter_speed != bus_speed) && - ((rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, adapter_speed, bus_speed, max_bus_speed)))) - return rc; - } + rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp); + if (rc) + return rc; - down(&ctrl->crit_sect); /* turn on board, blink green LED, turn off Amber LED */ if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { err("%s: Issue of Slot Enable command failed\n", __FUNCTION__); - up(&ctrl->crit_sect); return rc; } - if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) { - err("%s: Failed to enable slot, error code(%d)\n", __FUNCTION__, rc); - up(&ctrl->crit_sect); - return rc; - } - - up(&ctrl->crit_sect); - /* Wait for ~1 second */ - wait_for_ctrl_irq (ctrl); + msleep(1000); dbg("%s: slot status = %x\n", __FUNCTION__, p_slot->status); /* Check for a power fault */ @@ -520,40 +338,18 @@ static int board_added(struct slot *p_slot) p_slot->is_a_board = 0x01; p_slot->pwr_save = 1; - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - p_slot->hpc_ops->green_led_on(p_slot); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - return 0; err_exit: - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { err("%s: Issue of Slot Disable command failed\n", __FUNCTION__); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - return rc; - } - - rc = p_slot->hpc_ops->check_cmd_status(ctrl); - if (rc) { - err("%s: Failed to disable slot, error code(%d)\n", __FUNCTION__, rc); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); return rc; } - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - return(rc); } @@ -580,37 +376,19 @@ static int remove_board(struct slot *p_slot) if (p_slot->is_a_board) p_slot->status = 0x01; - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { err("%s: Issue of Slot Disable command failed\n", __FUNCTION__); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); return rc; } - - rc = p_slot->hpc_ops->check_cmd_status(ctrl); - if (rc) { - err("%s: Failed to disable slot, error code(%d)\n", __FUNCTION__, rc); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - return rc; - } rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); if (rc) { err("%s: Issue of Set Attention command failed\n", __FUNCTION__); - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); return rc; } - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - p_slot->pwr_save = 0; p_slot->is_a_board = 0; @@ -618,13 +396,10 @@ static int remove_board(struct slot *p_slot) } -static void pushbutton_helper_thread (unsigned long data) -{ - pushbutton_pending = data; - - up(&event_semaphore); -} - +struct pushbutton_work_info { + struct slot *p_slot; + struct work_struct work; +}; /** * shpchp_pushbutton_thread @@ -633,96 +408,63 @@ static void pushbutton_helper_thread (unsigned long data) * Handles all pending events and exits. * */ -static void shpchp_pushbutton_thread (unsigned long slot) +static void shpchp_pushbutton_thread(void *data) { - struct slot *p_slot = (struct slot *) slot; - u8 getstatus; - - pushbutton_pending = 0; - - if (!p_slot) { - dbg("%s: Error! slot NULL\n", __FUNCTION__); - return; - } - - p_slot->hpc_ops->get_power_status(p_slot, &getstatus); - if (getstatus) { - p_slot->state = POWEROFF_STATE; + struct pushbutton_work_info *info = data; + struct slot *p_slot = info->p_slot; + mutex_lock(&p_slot->lock); + switch (p_slot->state) { + case POWEROFF_STATE: + mutex_unlock(&p_slot->lock); shpchp_disable_slot(p_slot); + mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; - } else { - p_slot->state = POWERON_STATE; - - if (shpchp_enable_slot(p_slot)) { - /* Wait for exclusive access to hardware */ - down(&p_slot->ctrl->crit_sect); - + break; + case POWERON_STATE: + mutex_unlock(&p_slot->lock); + if (shpchp_enable_slot(p_slot)) p_slot->hpc_ops->green_led_off(p_slot); - - /* Done with exclusive hardware access */ - up(&p_slot->ctrl->crit_sect); - } + mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; + break; + default: + break; } + mutex_unlock(&p_slot->lock); - return; -} - - -/* this is the main worker thread */ -static int event_thread(void* data) -{ - struct controller *ctrl; - lock_kernel(); - daemonize("shpchpd_event"); - unlock_kernel(); - - while (1) { - dbg("!!!!event_thread sleeping\n"); - down_interruptible (&event_semaphore); - dbg("event_thread woken finished = %d\n", event_finished); - if (event_finished || signal_pending(current)) - break; - /* Do stuff here */ - if (pushbutton_pending) - shpchp_pushbutton_thread(pushbutton_pending); - else - for (ctrl = shpchp_ctrl_list; ctrl; ctrl=ctrl->next) - interrupt_event_handler(ctrl); - } - dbg("event_thread signals exit\n"); - up(&event_exit); - return 0; + kfree(info); } -int shpchp_event_start_thread (void) +void queue_pushbutton_work(void *data) { - int pid; + struct slot *p_slot = data; + struct pushbutton_work_info *info; - /* initialize our semaphores */ - init_MUTEX_LOCKED(&event_exit); - event_finished=0; - - init_MUTEX_LOCKED(&event_semaphore); - pid = kernel_thread(event_thread, NULL, 0); - - if (pid < 0) { - err ("Can't start up our event thread\n"); - return -1; + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + err("%s: Cannot allocate memory\n", __FUNCTION__); + return; } - return 0; -} - + info->p_slot = p_slot; + INIT_WORK(&info->work, shpchp_pushbutton_thread, info); -void shpchp_event_stop_thread (void) -{ - event_finished = 1; - up(&event_semaphore); - down(&event_exit); + mutex_lock(&p_slot->lock); + switch (p_slot->state) { + case BLINKINGOFF_STATE: + p_slot->state = POWEROFF_STATE; + break; + case BLINKINGON_STATE: + p_slot->state = POWERON_STATE; + break; + default: + goto out; + } + queue_work(shpchp_wq, &info->work); + out: + mutex_unlock(&p_slot->lock); } - static int update_slot_info (struct slot *slot) { struct hotplug_slot_info *info; @@ -742,149 +484,110 @@ static int update_slot_info (struct slot *slot) return result; } -static void interrupt_event_handler(struct controller *ctrl) +/* + * Note: This function must be called with slot->lock held + */ +static void handle_button_press_event(struct slot *p_slot) { - int loop = 0; - int change = 1; - u8 hp_slot; u8 getstatus; - struct slot *p_slot; - while (change) { - change = 0; - - for (loop = 0; loop < 10; loop++) { - if (ctrl->event_queue[loop].event_type != 0) { - dbg("%s:loop %x event_type %x\n", __FUNCTION__, loop, - ctrl->event_queue[loop].event_type); - hp_slot = ctrl->event_queue[loop].hp_slot; - - p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); - - if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { - dbg("%s: button cancel\n", __FUNCTION__); - del_timer(&p_slot->task_event); - - switch (p_slot->state) { - case BLINKINGOFF_STATE: - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - - p_slot->hpc_ops->green_led_on(p_slot); - - p_slot->hpc_ops->set_attention_status(p_slot, 0); - - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - break; - case BLINKINGON_STATE: - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - - p_slot->hpc_ops->green_led_off(p_slot); - - p_slot->hpc_ops->set_attention_status(p_slot, 0); - - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - - break; - default: - warn("Not a valid state\n"); - return; - } - info(msg_button_cancel, p_slot->number); - p_slot->state = STATIC_STATE; - } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { - /* Button Pressed (No action on 1st press...) */ - dbg("%s: Button pressed\n", __FUNCTION__); - - p_slot->hpc_ops->get_power_status(p_slot, &getstatus); - if (getstatus) { - /* slot is on */ - dbg("%s: slot is on\n", __FUNCTION__); - p_slot->state = BLINKINGOFF_STATE; - info(msg_button_off, p_slot->number); - } else { - /* slot is off */ - dbg("%s: slot is off\n", __FUNCTION__); - p_slot->state = BLINKINGON_STATE; - info(msg_button_on, p_slot->number); - } - - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - - /* blink green LED and turn off amber */ - p_slot->hpc_ops->green_led_blink(p_slot); - - p_slot->hpc_ops->set_attention_status(p_slot, 0); - - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - - init_timer(&p_slot->task_event); - p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ - p_slot->task_event.function = (void (*)(unsigned long)) pushbutton_helper_thread; - p_slot->task_event.data = (unsigned long) p_slot; - - dbg("%s: add_timer p_slot = %p\n", __FUNCTION__,(void *) p_slot); - add_timer(&p_slot->task_event); - } else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { - /***********POWER FAULT********************/ - dbg("%s: power fault\n", __FUNCTION__); - /* Wait for exclusive access to hardware */ - down(&ctrl->crit_sect); - - p_slot->hpc_ops->set_attention_status(p_slot, 1); - - p_slot->hpc_ops->green_led_off(p_slot); - - /* Done with exclusive hardware access */ - up(&ctrl->crit_sect); - } else { - /* refresh notification */ - if (p_slot) - update_slot_info(p_slot); - } - - ctrl->event_queue[loop].event_type = 0; - - change = 1; - } - } /* End of FOR loop */ + switch (p_slot->state) { + case STATIC_STATE: + p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + if (getstatus) { + p_slot->state = BLINKINGOFF_STATE; + info(msg_button_off, p_slot->number); + } else { + p_slot->state = BLINKINGON_STATE; + info(msg_button_on, p_slot->number); + } + /* blink green LED and turn off amber */ + p_slot->hpc_ops->green_led_blink(p_slot); + p_slot->hpc_ops->set_attention_status(p_slot, 0); + + schedule_delayed_work(&p_slot->work, 5*HZ); + break; + case BLINKINGOFF_STATE: + case BLINKINGON_STATE: + /* + * Cancel if we are still blinking; this means that we + * press the attention again before the 5 sec. limit + * expires to cancel hot-add or hot-remove + */ + info("Button cancel on Slot(%s)\n", p_slot->name); + dbg("%s: button cancel\n", __FUNCTION__); + cancel_delayed_work(&p_slot->work); + if (p_slot->state == BLINKINGOFF_STATE) + p_slot->hpc_ops->green_led_on(p_slot); + else + p_slot->hpc_ops->green_led_off(p_slot); + p_slot->hpc_ops->set_attention_status(p_slot, 0); + info(msg_button_cancel, p_slot->number); + p_slot->state = STATIC_STATE; + break; + case POWEROFF_STATE: + case POWERON_STATE: + /* + * Ignore if the slot is on power-on or power-off state; + * this means that the previous attention button action + * to hot-add or hot-remove is undergoing + */ + info("Button ignore on Slot(%s)\n", p_slot->name); + update_slot_info(p_slot); + break; + default: + warn("Not a valid state\n"); + break; } +} + +static void interrupt_event_handler(void *data) +{ + struct event_info *info = data; + struct slot *p_slot = info->p_slot; + + mutex_lock(&p_slot->lock); + switch (info->event_type) { + case INT_BUTTON_PRESS: + handle_button_press_event(p_slot); + break; + case INT_POWER_FAULT: + dbg("%s: power fault\n", __FUNCTION__); + p_slot->hpc_ops->set_attention_status(p_slot, 1); + p_slot->hpc_ops->green_led_off(p_slot); + break; + default: + update_slot_info(p_slot); + break; + } + mutex_unlock(&p_slot->lock); - return; + kfree(info); } -int shpchp_enable_slot (struct slot *p_slot) +static int shpchp_enable_slot (struct slot *p_slot) { u8 getstatus = 0; - int rc; + int rc, retval = -ENODEV; /* Check to see if (latch closed, card present, power off) */ - down(&p_slot->ctrl->crit_sect); + mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { info("%s: no adapter on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); - return -ENODEV; + goto out; } rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { info("%s: latch open on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); - return -ENODEV; + goto out; } rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || getstatus) { info("%s: already enabled on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); - return -ENODEV; + goto out; } - up(&p_slot->ctrl->crit_sect); p_slot->is_a_board = 1; @@ -899,56 +602,119 @@ int shpchp_enable_slot (struct slot *p_slot) && p_slot->ctrl->num_slots == 1) { /* handle amd pogo errata; this must be done before enable */ amd_pogo_errata_save_misc_reg(p_slot); - rc = board_added(p_slot); + retval = board_added(p_slot); /* handle amd pogo errata; this must be done after enable */ amd_pogo_errata_restore_misc_reg(p_slot); } else - rc = board_added(p_slot); + retval = board_added(p_slot); - if (rc) { + if (retval) { p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); } update_slot_info(p_slot); - return rc; + out: + mutex_unlock(&p_slot->ctrl->crit_sect); + return retval; } -int shpchp_disable_slot (struct slot *p_slot) +static int shpchp_disable_slot (struct slot *p_slot) { u8 getstatus = 0; - int ret = 0; + int rc, retval = -ENODEV; if (!p_slot->ctrl) return -ENODEV; /* Check to see if (latch closed, card present, power on) */ - down(&p_slot->ctrl->crit_sect); + mutex_lock(&p_slot->ctrl->crit_sect); - ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); - if (ret || !getstatus) { + rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); + if (rc || !getstatus) { info("%s: no adapter on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); - return -ENODEV; + goto out; } - ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); - if (ret || getstatus) { + rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + if (rc || getstatus) { info("%s: latch open on slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); - return -ENODEV; + goto out; } - ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); - if (ret || !getstatus) { + rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + if (rc || !getstatus) { info("%s: already disabled slot(%x)\n", __FUNCTION__, p_slot->number); - up(&p_slot->ctrl->crit_sect); - return -ENODEV; + goto out; } - up(&p_slot->ctrl->crit_sect); - ret = remove_board(p_slot); + retval = remove_board(p_slot); update_slot_info(p_slot); - return ret; + out: + mutex_unlock(&p_slot->ctrl->crit_sect); + return retval; } +int shpchp_sysfs_enable_slot(struct slot *p_slot) +{ + int retval = -ENODEV; + + mutex_lock(&p_slot->lock); + switch (p_slot->state) { + case BLINKINGON_STATE: + cancel_delayed_work(&p_slot->work); + case STATIC_STATE: + p_slot->state = POWERON_STATE; + mutex_unlock(&p_slot->lock); + retval = shpchp_enable_slot(p_slot); + mutex_lock(&p_slot->lock); + p_slot->state = STATIC_STATE; + break; + case POWERON_STATE: + info("Slot %s is already in powering on state\n", + p_slot->name); + break; + case BLINKINGOFF_STATE: + case POWEROFF_STATE: + info("Already enabled on slot %s\n", p_slot->name); + break; + default: + err("Not a valid state on slot %s\n", p_slot->name); + break; + } + mutex_unlock(&p_slot->lock); + + return retval; +} + +int shpchp_sysfs_disable_slot(struct slot *p_slot) +{ + int retval = -ENODEV; + + mutex_lock(&p_slot->lock); + switch (p_slot->state) { + case BLINKINGOFF_STATE: + cancel_delayed_work(&p_slot->work); + case STATIC_STATE: + p_slot->state = POWEROFF_STATE; + mutex_unlock(&p_slot->lock); + retval = shpchp_disable_slot(p_slot); + mutex_lock(&p_slot->lock); + p_slot->state = STATIC_STATE; + break; + case POWEROFF_STATE: + info("Slot %s is already in powering off state\n", + p_slot->name); + break; + case BLINKINGON_STATE: + case POWERON_STATE: + info("Already disabled on slot %s\n", p_slot->name); + break; + default: + err("Not a valid state on slot %s\n", p_slot->name); + break; + } + mutex_unlock(&p_slot->lock); + + return retval; +} diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index b4226ff3a85..66123cf4dea 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -82,31 +82,6 @@ #define SLOT_100MHZ_PCIX_533 0x0f000000 #define SLOT_133MHZ_PCIX_533 0xf0000000 - -/* Secondary Bus Configuration Register */ -/* For PI = 1, Bits 0 to 2 have been encoded as follows to show current bus speed/mode */ -#define PCI_33MHZ 0x0 -#define PCI_66MHZ 0x1 -#define PCIX_66MHZ 0x2 -#define PCIX_100MHZ 0x3 -#define PCIX_133MHZ 0x4 - -/* For PI = 2, Bits 0 to 3 have been encoded as follows to show current bus speed/mode */ -#define PCI_33MHZ 0x0 -#define PCI_66MHZ 0x1 -#define PCIX_66MHZ 0x2 -#define PCIX_100MHZ 0x3 -#define PCIX_133MHZ 0x4 -#define PCIX_66MHZ_ECC 0x5 -#define PCIX_100MHZ_ECC 0x6 -#define PCIX_133MHZ_ECC 0x7 -#define PCIX_66MHZ_266 0x9 -#define PCIX_100MHZ_266 0xa -#define PCIX_133MHZ_266 0xb -#define PCIX_66MHZ_533 0x11 -#define PCIX_100MHZ_533 0x12 -#define PCIX_133MHZ_533 0x13 - /* Slot Configuration */ #define SLOT_NUM 0x0000001F #define FIRST_DEV_NUM 0x00001F00 @@ -231,6 +206,7 @@ static spinlock_t list_lock; static irqreturn_t shpc_isr(int IRQ, void *dev_id, struct pt_regs *regs); static void start_int_poll_timer(struct php_ctlr_state_s *php_ctlr, int seconds); +static int hpc_check_cmd_status(struct controller *ctrl); /* This is the interrupt polling timeout function. */ static void int_poll_timeout(unsigned long lphp_ctlr) @@ -303,10 +279,13 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) int i; DBG_ENTER_ROUTINE - + + mutex_lock(&slot->ctrl->cmd_lock); + if (!php_ctlr) { err("%s: Invalid HPC controller handle!\n", __FUNCTION__); - return -1; + retval = -EINVAL; + goto out; } for (i = 0; i < 10; i++) { @@ -323,7 +302,8 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) if (cmd_status & 0x1) { /* After 1 sec and and the controller is still busy */ err("%s : Controller is still busy after 1 sec.\n", __FUNCTION__); - return -1; + retval = -EBUSY; + goto out; } ++t_slot; @@ -340,6 +320,17 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) * Wait for command completion. */ retval = shpc_wait_cmd(slot->ctrl); + if (retval) + goto out; + + cmd_status = hpc_check_cmd_status(slot->ctrl); + if (cmd_status) { + err("%s: Failed to issued command 0x%x (error code = %d)\n", + __FUNCTION__, cmd, cmd_status); + retval = -EIO; + } + out: + mutex_unlock(&slot->ctrl->cmd_lock); DBG_LEAVE_ROUTINE return retval; @@ -532,81 +523,41 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int) static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) { - struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; - u32 slot_reg; - u16 slot_status, sec_bus_status; - u8 m66_cap, pcix_cap, pi; int retval = 0; + struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; + u32 slot_reg = readl(php_ctlr->creg + SLOT1 + 4 * slot->hp_slot); + u8 pcix_cap = (slot_reg >> 12) & 7; + u8 m66_cap = (slot_reg >> 9) & 1; DBG_ENTER_ROUTINE - if (!slot->ctrl->hpc_ctlr_handle) { - err("%s: Invalid HPC controller handle!\n", __FUNCTION__); - return -1; - } - - if (slot->hp_slot >= php_ctlr->num_slots) { - err("%s: Invalid HPC slot number!\n", __FUNCTION__); - return -1; - } - - pi = readb(php_ctlr->creg + PROG_INTERFACE); - slot_reg = readl(php_ctlr->creg + SLOT1 + 4*(slot->hp_slot)); - dbg("%s: pi = %d, slot_reg = %x\n", __FUNCTION__, pi, slot_reg); - slot_status = (u16) slot_reg; - dbg("%s: slot_status = %x\n", __FUNCTION__, slot_status); - sec_bus_status = readw(php_ctlr->creg + SEC_BUS_CONFIG); - - pcix_cap = (u8) ((slot_status & 0x3000) >> 12); - dbg("%s: pcix_cap = %x\n", __FUNCTION__, pcix_cap); - m66_cap = (u8) ((slot_status & 0x0200) >> 9); - dbg("%s: m66_cap = %x\n", __FUNCTION__, m66_cap); - + dbg("%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n", + __FUNCTION__, slot_reg, pcix_cap, m66_cap); - if (pi == 2) { - switch (pcix_cap) { - case 0: - *value = m66_cap ? PCI_SPEED_66MHz : PCI_SPEED_33MHz; - break; - case 1: - *value = PCI_SPEED_66MHz_PCIX; - break; - case 3: - *value = PCI_SPEED_133MHz_PCIX; - break; - case 4: - *value = PCI_SPEED_133MHz_PCIX_266; - break; - case 5: - *value = PCI_SPEED_133MHz_PCIX_533; - break; - case 2: /* Reserved */ - default: - *value = PCI_SPEED_UNKNOWN; - retval = -ENODEV; - break; - } - } else { - switch (pcix_cap) { - case 0: - *value = m66_cap ? PCI_SPEED_66MHz : PCI_SPEED_33MHz; - break; - case 1: - *value = PCI_SPEED_66MHz_PCIX; - break; - case 3: - *value = PCI_SPEED_133MHz_PCIX; - break; - case 2: /* Reserved */ - default: - *value = PCI_SPEED_UNKNOWN; - retval = -ENODEV; - break; - } + switch (pcix_cap) { + case 0x0: + *value = m66_cap ? PCI_SPEED_66MHz : PCI_SPEED_33MHz; + break; + case 0x1: + *value = PCI_SPEED_66MHz_PCIX; + break; + case 0x3: + *value = PCI_SPEED_133MHz_PCIX; + break; + case 0x4: + *value = PCI_SPEED_133MHz_PCIX_266; + break; + case 0x5: + *value = PCI_SPEED_133MHz_PCIX_533; + break; + case 0x2: + default: + *value = PCI_SPEED_UNKNOWN; + retval = -ENODEV; + break; } dbg("Adapter speed = %d\n", *value); - DBG_LEAVE_ROUTINE return retval; } @@ -797,6 +748,7 @@ static void hpc_release_ctlr(struct controller *ctrl) { struct php_ctlr_state_s *php_ctlr = ctrl->hpc_ctlr_handle; struct php_ctlr_state_s *p, *p_prev; + int i; DBG_ENTER_ROUTINE @@ -805,6 +757,14 @@ static void hpc_release_ctlr(struct controller *ctrl) return ; } + /* + * Mask all slot event interrupts + */ + for (i = 0; i < ctrl->num_slots; i++) + writel(0xffff3fff, php_ctlr->creg + SLOT1 + (4 * i)); + + cleanup_slots(ctrl); + if (shpchp_poll_mode) { del_timer(&php_ctlr->int_poll_timer); } else { @@ -814,6 +774,7 @@ static void hpc_release_ctlr(struct controller *ctrl) pci_disable_msi(php_ctlr->pci_dev); } } + if (php_ctlr->pci_dev) { iounmap(php_ctlr->creg); release_mem_region(ctrl->mmio_base, ctrl->mmio_size); @@ -939,98 +900,66 @@ static int hpc_slot_disable(struct slot * slot) static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) { - u8 slot_cmd; - u8 pi; - int retval = 0; + int retval; struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; + u8 pi, cmd; DBG_ENTER_ROUTINE - - if (!slot->ctrl->hpc_ctlr_handle) { - err("%s: Invalid HPC controller handle!\n", __FUNCTION__); - return -1; - } pi = readb(php_ctlr->creg + PROG_INTERFACE); - - if (pi == 1) { - switch (value) { - case 0: - slot_cmd = SETA_PCI_33MHZ; - break; - case 1: - slot_cmd = SETA_PCI_66MHZ; - break; - case 2: - slot_cmd = SETA_PCIX_66MHZ; - break; - case 3: - slot_cmd = SETA_PCIX_100MHZ; - break; - case 4: - slot_cmd = SETA_PCIX_133MHZ; - break; - default: - slot_cmd = PCI_SPEED_UNKNOWN; - retval = -ENODEV; - return retval; - } - } else { - switch (value) { - case 0: - slot_cmd = SETB_PCI_33MHZ; - break; - case 1: - slot_cmd = SETB_PCI_66MHZ; - break; - case 2: - slot_cmd = SETB_PCIX_66MHZ_PM; - break; - case 3: - slot_cmd = SETB_PCIX_100MHZ_PM; - break; - case 4: - slot_cmd = SETB_PCIX_133MHZ_PM; - break; - case 5: - slot_cmd = SETB_PCIX_66MHZ_EM; - break; - case 6: - slot_cmd = SETB_PCIX_100MHZ_EM; - break; - case 7: - slot_cmd = SETB_PCIX_133MHZ_EM; - break; - case 8: - slot_cmd = SETB_PCIX_66MHZ_266; - break; - case 0x9: - slot_cmd = SETB_PCIX_100MHZ_266; - break; - case 0xa: - slot_cmd = SETB_PCIX_133MHZ_266; - break; - case 0xb: - slot_cmd = SETB_PCIX_66MHZ_533; - break; - case 0xc: - slot_cmd = SETB_PCIX_100MHZ_533; - break; - case 0xd: - slot_cmd = SETB_PCIX_133MHZ_533; - break; - default: - slot_cmd = PCI_SPEED_UNKNOWN; - retval = -ENODEV; - return retval; - } + if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX)) + return -EINVAL; + switch (value) { + case PCI_SPEED_33MHz: + cmd = SETA_PCI_33MHZ; + break; + case PCI_SPEED_66MHz: + cmd = SETA_PCI_66MHZ; + break; + case PCI_SPEED_66MHz_PCIX: + cmd = SETA_PCIX_66MHZ; + break; + case PCI_SPEED_100MHz_PCIX: + cmd = SETA_PCIX_100MHZ; + break; + case PCI_SPEED_133MHz_PCIX: + cmd = SETA_PCIX_133MHZ; + break; + case PCI_SPEED_66MHz_PCIX_ECC: + cmd = SETB_PCIX_66MHZ_EM; + break; + case PCI_SPEED_100MHz_PCIX_ECC: + cmd = SETB_PCIX_100MHZ_EM; + break; + case PCI_SPEED_133MHz_PCIX_ECC: + cmd = SETB_PCIX_133MHZ_EM; + break; + case PCI_SPEED_66MHz_PCIX_266: + cmd = SETB_PCIX_66MHZ_266; + break; + case PCI_SPEED_100MHz_PCIX_266: + cmd = SETB_PCIX_100MHZ_266; + break; + case PCI_SPEED_133MHz_PCIX_266: + cmd = SETB_PCIX_133MHZ_266; + break; + case PCI_SPEED_66MHz_PCIX_533: + cmd = SETB_PCIX_66MHZ_533; + break; + case PCI_SPEED_100MHz_PCIX_533: + cmd = SETB_PCIX_100MHZ_533; + break; + case PCI_SPEED_133MHz_PCIX_533: + cmd = SETB_PCIX_133MHZ_533; + break; + default: + return -EINVAL; } - retval = shpc_write_cmd(slot, 0, slot_cmd); - if (retval) { + + retval = shpc_write_cmd(slot, 0, cmd); + if (retval) err("%s: Write command failed!\n", __FUNCTION__); - return -1; - } DBG_LEAVE_ROUTINE return retval; @@ -1093,14 +1022,8 @@ static irqreturn_t shpc_isr(int IRQ, void *dev_id, struct pt_regs *regs) wake_up_interruptible(&ctrl->queue); } - if ((intr_loc = (intr_loc >> 1)) == 0) { - /* Unmask Global Interrupt Mask */ - temp_dword = readl(php_ctlr->creg + SERR_INTR_ENABLE); - temp_dword &= 0xfffffffe; - writel(temp_dword, php_ctlr->creg + SERR_INTR_ENABLE); - - return IRQ_NONE; - } + if ((intr_loc = (intr_loc >> 1)) == 0) + goto out; for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { /* To find out which slot has interrupt pending */ @@ -1130,6 +1053,7 @@ static irqreturn_t shpc_isr(int IRQ, void *dev_id, struct pt_regs *regs) dbg("%s: intr_loc2 = %x\n",__FUNCTION__, intr_loc2); } } + out: if (!shpchp_poll_mode) { /* Unmask Global Interrupt Mask */ temp_dword = readl(php_ctlr->creg + SERR_INTR_ENABLE); @@ -1142,64 +1066,43 @@ static irqreturn_t shpc_isr(int IRQ, void *dev_id, struct pt_regs *regs) static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) { + int retval = 0; struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; - int retval = 0; - u8 pi; - u32 slot_avail1, slot_avail2; + u8 pi = readb(php_ctlr->creg + PROG_INTERFACE); + u32 slot_avail1 = readl(php_ctlr->creg + SLOT_AVAIL1); + u32 slot_avail2 = readl(php_ctlr->creg + SLOT_AVAIL2); DBG_ENTER_ROUTINE - if (!slot->ctrl->hpc_ctlr_handle) { - err("%s: Invalid HPC controller handle!\n", __FUNCTION__); - return -1; - } - - if (slot->hp_slot >= php_ctlr->num_slots) { - err("%s: Invalid HPC slot number!\n", __FUNCTION__); - return -1; - } - - pi = readb(php_ctlr->creg + PROG_INTERFACE); - slot_avail1 = readl(php_ctlr->creg + SLOT_AVAIL1); - slot_avail2 = readl(php_ctlr->creg + SLOT_AVAIL2); - if (pi == 2) { if (slot_avail2 & SLOT_133MHZ_PCIX_533) - bus_speed = PCIX_133MHZ_533; + bus_speed = PCI_SPEED_133MHz_PCIX_533; else if (slot_avail2 & SLOT_100MHZ_PCIX_533) - bus_speed = PCIX_100MHZ_533; + bus_speed = PCI_SPEED_100MHz_PCIX_533; else if (slot_avail2 & SLOT_66MHZ_PCIX_533) - bus_speed = PCIX_66MHZ_533; + bus_speed = PCI_SPEED_66MHz_PCIX_533; else if (slot_avail2 & SLOT_133MHZ_PCIX_266) - bus_speed = PCIX_133MHZ_266; + bus_speed = PCI_SPEED_133MHz_PCIX_266; else if (slot_avail2 & SLOT_100MHZ_PCIX_266) - bus_speed = PCIX_100MHZ_266; + bus_speed = PCI_SPEED_100MHz_PCIX_266; else if (slot_avail2 & SLOT_66MHZ_PCIX_266) - bus_speed = PCIX_66MHZ_266; - else if (slot_avail1 & SLOT_133MHZ_PCIX) - bus_speed = PCIX_133MHZ; - else if (slot_avail1 & SLOT_100MHZ_PCIX) - bus_speed = PCIX_100MHZ; - else if (slot_avail1 & SLOT_66MHZ_PCIX) - bus_speed = PCIX_66MHZ; - else if (slot_avail2 & SLOT_66MHZ) - bus_speed = PCI_66MHZ; - else if (slot_avail1 & SLOT_33MHZ) - bus_speed = PCI_33MHZ; - else bus_speed = PCI_SPEED_UNKNOWN; - } else { + bus_speed = PCI_SPEED_66MHz_PCIX_266; + } + + if (bus_speed == PCI_SPEED_UNKNOWN) { if (slot_avail1 & SLOT_133MHZ_PCIX) - bus_speed = PCIX_133MHZ; + bus_speed = PCI_SPEED_133MHz_PCIX; else if (slot_avail1 & SLOT_100MHZ_PCIX) - bus_speed = PCIX_100MHZ; + bus_speed = PCI_SPEED_100MHz_PCIX; else if (slot_avail1 & SLOT_66MHZ_PCIX) - bus_speed = PCIX_66MHZ; + bus_speed = PCI_SPEED_66MHz_PCIX; else if (slot_avail2 & SLOT_66MHZ) - bus_speed = PCI_66MHZ; + bus_speed = PCI_SPEED_66MHz; else if (slot_avail1 & SLOT_33MHZ) - bus_speed = PCI_33MHZ; - else bus_speed = PCI_SPEED_UNKNOWN; + bus_speed = PCI_SPEED_33MHz; + else + retval = -ENODEV; } *value = bus_speed; @@ -1210,111 +1113,69 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value) { + int retval = 0; struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; - u16 sec_bus_status; - int retval = 0; - u8 pi; + u16 sec_bus_reg = readw(php_ctlr->creg + SEC_BUS_CONFIG); + u8 pi = readb(php_ctlr->creg + PROG_INTERFACE); + u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); DBG_ENTER_ROUTINE - if (!slot->ctrl->hpc_ctlr_handle) { - err("%s: Invalid HPC controller handle!\n", __FUNCTION__); - return -1; - } - - if (slot->hp_slot >= php_ctlr->num_slots) { - err("%s: Invalid HPC slot number!\n", __FUNCTION__); - return -1; + if ((pi == 1) && (speed_mode > 4)) { + *value = PCI_SPEED_UNKNOWN; + return -ENODEV; } - pi = readb(php_ctlr->creg + PROG_INTERFACE); - sec_bus_status = readw(php_ctlr->creg + SEC_BUS_CONFIG); - - if (pi == 2) { - switch (sec_bus_status & 0x000f) { - case 0: - bus_speed = PCI_SPEED_33MHz; - break; - case 1: - bus_speed = PCI_SPEED_66MHz; - break; - case 2: - bus_speed = PCI_SPEED_66MHz_PCIX; - break; - case 3: - bus_speed = PCI_SPEED_100MHz_PCIX; - break; - case 4: - bus_speed = PCI_SPEED_133MHz_PCIX; - break; - case 5: - bus_speed = PCI_SPEED_66MHz_PCIX_ECC; - break; - case 6: - bus_speed = PCI_SPEED_100MHz_PCIX_ECC; - break; - case 7: - bus_speed = PCI_SPEED_133MHz_PCIX_ECC; - break; - case 8: - bus_speed = PCI_SPEED_66MHz_PCIX_266; - break; - case 9: - bus_speed = PCI_SPEED_100MHz_PCIX_266; - break; - case 0xa: - bus_speed = PCI_SPEED_133MHz_PCIX_266; - break; - case 0xb: - bus_speed = PCI_SPEED_66MHz_PCIX_533; - break; - case 0xc: - bus_speed = PCI_SPEED_100MHz_PCIX_533; - break; - case 0xd: - bus_speed = PCI_SPEED_133MHz_PCIX_533; - break; - case 0xe: - case 0xf: - default: - bus_speed = PCI_SPEED_UNKNOWN; - break; - } - } else { - /* In the case where pi is undefined, default it to 1 */ - switch (sec_bus_status & 0x0007) { - case 0: - bus_speed = PCI_SPEED_33MHz; - break; - case 1: - bus_speed = PCI_SPEED_66MHz; - break; - case 2: - bus_speed = PCI_SPEED_66MHz_PCIX; - break; - case 3: - bus_speed = PCI_SPEED_100MHz_PCIX; - break; - case 4: - bus_speed = PCI_SPEED_133MHz_PCIX; - break; - case 5: - bus_speed = PCI_SPEED_UNKNOWN; /* Reserved */ - break; - case 6: - bus_speed = PCI_SPEED_UNKNOWN; /* Reserved */ - break; - case 7: - bus_speed = PCI_SPEED_UNKNOWN; /* Reserved */ - break; - default: - bus_speed = PCI_SPEED_UNKNOWN; - break; - } + switch (speed_mode) { + case 0x0: + *value = PCI_SPEED_33MHz; + break; + case 0x1: + *value = PCI_SPEED_66MHz; + break; + case 0x2: + *value = PCI_SPEED_66MHz_PCIX; + break; + case 0x3: + *value = PCI_SPEED_100MHz_PCIX; + break; + case 0x4: + *value = PCI_SPEED_133MHz_PCIX; + break; + case 0x5: + *value = PCI_SPEED_66MHz_PCIX_ECC; + break; + case 0x6: + *value = PCI_SPEED_100MHz_PCIX_ECC; + break; + case 0x7: + *value = PCI_SPEED_133MHz_PCIX_ECC; + break; + case 0x8: + *value = PCI_SPEED_66MHz_PCIX_266; + break; + case 0x9: + *value = PCI_SPEED_100MHz_PCIX_266; + break; + case 0xa: + *value = PCI_SPEED_133MHz_PCIX_266; + break; + case 0xb: + *value = PCI_SPEED_66MHz_PCIX_533; + break; + case 0xc: + *value = PCI_SPEED_100MHz_PCIX_533; + break; + case 0xd: + *value = PCI_SPEED_133MHz_PCIX_533; + break; + default: + *value = PCI_SPEED_UNKNOWN; + retval = -ENODEV; + break; } - *value = bus_speed; dbg("Current bus speed = %d\n", bus_speed); DBG_LEAVE_ROUTINE return retval; @@ -1343,7 +1204,6 @@ static struct hpc_ops shpchp_hpc_ops = { .green_led_blink = hpc_set_green_led_blink, .release_ctlr = hpc_release_ctlr, - .check_cmd_status = hpc_check_cmd_status, }; inline static int shpc_indirect_creg_read(struct controller *ctrl, int index, @@ -1375,15 +1235,13 @@ int shpc_init(struct controller * ctrl, struct pci_dev * pdev) ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ spin_lock_init(&list_lock); - php_ctlr = (struct php_ctlr_state_s *) kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL); + php_ctlr = kzalloc(sizeof(*php_ctlr), GFP_KERNEL); if (!php_ctlr) { /* allocate controller state data */ err("%s: HPC controller memory allocation error!\n", __FUNCTION__); goto abort; } - memset(php_ctlr, 0, sizeof(struct php_ctlr_state_s)); - php_ctlr->pci_dev = pdev; /* save pci_dev in context */ if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == @@ -1454,7 +1312,9 @@ int shpc_init(struct controller * ctrl, struct pci_dev * pdev) } dbg("%s: php_ctlr->creg %p\n", __FUNCTION__, php_ctlr->creg); - init_MUTEX(&ctrl->crit_sect); + mutex_init(&ctrl->crit_sect); + mutex_init(&ctrl->cmd_lock); + /* Setup wait queue */ init_waitqueue_head(&ctrl->queue); diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 19e1a5e1e30..257adc23399 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c @@ -38,7 +38,7 @@ static void program_fw_provided_values(struct pci_dev *dev) { u16 pci_cmd, pci_bctl; struct pci_dev *cdev; - struct hotplug_params hpp = {0x8, 0x40, 0, 0}; /* defaults */ + struct hotplug_params hpp; /* Program hpp values for this device */ if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || @@ -46,7 +46,13 @@ static void program_fw_provided_values(struct pci_dev *dev) (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) return; - get_hp_params_from_firmware(dev, &hpp); + /* use default values if we can't get them from firmware */ + if (get_hp_params_from_firmware(dev, &hpp)) { + hpp.cache_line_size = 8; + hpp.latency_timer = 0x40; + hpp.enable_serr = 0; + hpp.enable_perr = 0; + } pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp.cache_line_size); pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp.latency_timer); diff --git a/drivers/pci/hotplug/shpchprm_legacy.c b/drivers/pci/hotplug/shpchprm_legacy.c deleted file mode 100644 index ed6c1254bf6..00000000000 --- a/drivers/pci/hotplug/shpchprm_legacy.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SHPCHPRM Legacy: PHP Resource Manager for Non-ACPI/Legacy platform - * - * Copyright (C) 1995,2001 Compaq Computer Corporation - * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) - * Copyright (C) 2001 IBM Corp. - * Copyright (C) 2003-2004 Intel Corporation - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com> - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/pci.h> -#include "shpchp.h" - -int shpchprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, u8 busnum, u8 devnum) -{ - int offset = devnum - ctrl->slot_device_offset; - - *sun = (u8) (ctrl->first_slot + ctrl->slot_num_inc * offset); - return 0; -} - -void get_hp_params_from_firmware(struct pci_dev *dev, - struct hotplug_params *hpp) -{ - return; -} - -void get_hp_hw_control_from_firmware(struct pci_dev *dev) -{ - return; -} - diff --git a/drivers/pci/hotplug/shpchprm_nonacpi.c b/drivers/pci/hotplug/shpchprm_nonacpi.c deleted file mode 100644 index c6b40998eeb..00000000000 --- a/drivers/pci/hotplug/shpchprm_nonacpi.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * SHPCHPRM NONACPI: PHP Resource Manager for Non-ACPI/Legacy platform - * - * Copyright (C) 1995,2001 Compaq Computer Corporation - * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) - * Copyright (C) 2001 IBM Corp. - * Copyright (C) 2003-2004 Intel Corporation - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> - * - */ - -#include <linux/config.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/pci.h> -#include <linux/slab.h> - -#include "shpchp.h" - -int shpchprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, u8 busnum, u8 devnum) -{ - int offset = devnum - ctrl->slot_device_offset; - - dbg("%s: ctrl->slot_num_inc %d, offset %d\n", __FUNCTION__, ctrl->slot_num_inc, offset); - *sun = (u8) (ctrl->first_slot + ctrl->slot_num_inc * offset); - return 0; -} - -void get_hp_params_from_firmware(struct pci_dev *dev, - struct hotplug_params *hpp) -{ - return; -} - -void get_hp_hw_control_from_firmware(struct pci_dev *dev) -{ - return; -} diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 48723d6fa60..a77e79c8c82 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -103,9 +103,9 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { - int pos; + int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI); - if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI))) + if (!pos) return; pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), @@ -347,9 +347,9 @@ static int assign_msi_vector(void) static int get_new_vector(void) { - int vector; + int vector = assign_msi_vector(); - if ((vector = assign_msi_vector()) > 0) + if (vector > 0) set_intr_gate(vector, interrupt[vector]); return vector; @@ -369,7 +369,8 @@ static int msi_init(void) return status; } - if ((status = msi_cache_init()) < 0) { + status = msi_cache_init(); + if (status < 0) { pci_msi_enable = 0; printk(KERN_WARNING "PCI: MSI cache init failed\n"); return status; @@ -523,10 +524,12 @@ static int msi_capability_init(struct pci_dev *dev) pos = pci_find_capability(dev, PCI_CAP_ID_MSI); pci_read_config_word(dev, msi_control_reg(pos), &control); /* MSI Entry Initialization */ - if (!(entry = alloc_msi_entry())) + entry = alloc_msi_entry(); + if (!entry) return -ENOMEM; - if ((vector = get_msi_vector(dev)) < 0) { + vector = get_msi_vector(dev); + if (vector < 0) { kmem_cache_free(msi_cachep, entry); return -EBUSY; } @@ -597,7 +600,8 @@ static int msix_capability_init(struct pci_dev *dev, struct msg_address address; struct msg_data data; int vector, pos, i, j, nr_entries, temp = 0; - u32 phys_addr, table_offset; + unsigned long phys_addr; + u32 table_offset; u16 control; u8 bir; void __iomem *base; @@ -606,11 +610,11 @@ static int msix_capability_init(struct pci_dev *dev, /* Request & Map MSI-X table region */ pci_read_config_word(dev, msi_control_reg(pos), &control); nr_entries = multi_msix_capable(control); - pci_read_config_dword(dev, msix_table_offset_reg(pos), - &table_offset); + + pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); - phys_addr = pci_resource_start (dev, bir); - phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK); + table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; + phys_addr = pci_resource_start (dev, bir) + table_offset; base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); if (base == NULL) return -ENOMEM; @@ -620,7 +624,8 @@ static int msix_capability_init(struct pci_dev *dev, entry = alloc_msi_entry(); if (!entry) break; - if ((vector = get_msi_vector(dev)) < 0) + vector = get_msi_vector(dev); + if (vector < 0) break; j = entries[i].entry; @@ -699,12 +704,17 @@ int pci_enable_msi(struct pci_dev* dev) if (dev->no_msi) return status; + if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) + return -EINVAL; + temp = dev->irq; - if ((status = msi_init()) < 0) + status = msi_init(); + if (status < 0) return status; - if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI))) + pos = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (!pos) return -EINVAL; pci_read_config_word(dev, msi_control_reg(pos), &control); @@ -728,8 +738,8 @@ int pci_enable_msi(struct pci_dev* dev) dev->irq = temp; } /* Check whether driver already requested for MSI-X vectors */ - if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 && - !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { + pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { printk(KERN_INFO "PCI: %s: Can't enable MSI. " "Device already has MSI-X vectors assigned\n", pci_name(dev)); @@ -755,7 +765,13 @@ void pci_disable_msi(struct pci_dev* dev) u16 control; unsigned long flags; - if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI))) + if (!pci_msi_enable) + return; + if (!dev) + return; + + pos = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (!pos) return; pci_read_config_word(dev, msi_control_reg(pos), &control); @@ -826,8 +842,10 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) * Detect last MSI-X vector to be released. * Release the MSI-X memory-mapped table. */ +#if 0 int pos, nr_entries; - u32 phys_addr, table_offset; + unsigned long phys_addr; + u32 table_offset; u16 control; u8 bir; @@ -838,9 +856,12 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); - phys_addr = pci_resource_start (dev, bir); - phys_addr += (u32)(table_offset & - ~PCI_MSIX_FLAGS_BIRMASK); + table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; + phys_addr = pci_resource_start(dev, bir) + table_offset; +/* + * FIXME! and what did you want to do with phys_addr? + */ +#endif iounmap(base); } } @@ -924,10 +945,12 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) if (!pci_msi_enable || !dev || !entries) return -EINVAL; - if ((status = msi_init()) < 0) + status = msi_init(); + if (status < 0) return status; - if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX))) + pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (!pos) return -EINVAL; pci_read_config_word(dev, msi_control_reg(pos), &control); @@ -1006,7 +1029,13 @@ void pci_disable_msix(struct pci_dev* dev) int pos, temp; u16 control; - if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX))) + if (!pci_msi_enable) + return; + if (!dev) + return; + + pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (!pos) return; pci_read_config_word(dev, msi_control_reg(pos), &control); @@ -1066,8 +1095,8 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) return; temp = dev->irq; /* Save IOAPIC IRQ */ - if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 && - !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { + pos = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { spin_lock_irqsave(&msi_lock, flags); state = msi_desc[dev->irq]->msi_attrib.state; spin_unlock_irqrestore(&msi_lock, flags); @@ -1080,8 +1109,8 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) msi_free_vector(dev, dev->irq, 0); dev->irq = temp; /* Restore IOAPIC IRQ */ } - if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 && - !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { + pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { int vector, head, tail = 0, warning = 0; void __iomem *base = NULL; @@ -1101,7 +1130,9 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) msi_free_vector(dev, vector, 0); if (warning) { /* Force to release the MSI-X memory-mapped table */ - u32 phys_addr, table_offset; +#if 0 + unsigned long phys_addr; + u32 table_offset; u16 control; u8 bir; @@ -1110,9 +1141,12 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); - phys_addr = pci_resource_start (dev, bir); - phys_addr += (u32)(table_offset & - ~PCI_MSIX_FLAGS_BIRMASK); + table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; + phys_addr = pci_resource_start(dev, bir) + table_offset; +/* + * FIXME! and what did you want to do with phys_addr? + */ +#endif iounmap(base); printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " "called without free_irq() on all MSI-X vectors\n", @@ -1123,6 +1157,11 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) } } +void pci_no_msi(void) +{ + pci_msi_enable = 0; +} + EXPORT_SYMBOL(pci_enable_msi); EXPORT_SYMBOL(pci_disable_msi); EXPORT_SYMBOL(pci_enable_msix); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 0aa14c92b57..f22f69ac644 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -53,11 +53,10 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) if (fields < 0) return -EINVAL; - dynid = kmalloc(sizeof(*dynid), GFP_KERNEL); + dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; - memset(dynid, 0, sizeof(*dynid)); INIT_LIST_HEAD(&dynid->node); dynid->id.vendor = vendor; dynid->id.device = device; @@ -380,14 +379,6 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner) /* initialize common driver fields */ drv->driver.name = drv->name; drv->driver.bus = &pci_bus_type; - /* FIXME, once all of the existing PCI drivers have been fixed to set - * the pci shutdown function, this test can go away. */ - if (!drv->driver.shutdown) - drv->driver.shutdown = pci_device_shutdown; - else - printk(KERN_WARNING "Warning: PCI driver %s has a struct " - "device_driver shutdown method, please update!\n", - drv->name); drv->driver.owner = owner; drv->driver.kobj.ktype = &pci_driver_kobj_type; @@ -514,6 +505,7 @@ struct bus_type pci_bus_type = { .probe = pci_device_probe, .remove = pci_device_remove, .suspend = pci_device_suspend, + .shutdown = pci_device_shutdown, .resume = pci_device_resume, .dev_attrs = pci_dev_attrs, }; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 965a5934623..56ac2bc003c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -501,9 +501,8 @@ int pci_create_sysfs_dev_files (struct pci_dev *pdev) if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) { struct bin_attribute *rom_attr; - rom_attr = kmalloc(sizeof(*rom_attr), GFP_ATOMIC); + rom_attr = kzalloc(sizeof(*rom_attr), GFP_ATOMIC); if (rom_attr) { - memset(rom_attr, 0x00, sizeof(*rom_attr)); pdev->rom_attr = rom_attr; rom_attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE); rom_attr->attr.name = "rom"; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d2d18791664..bea1ad1ad5b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -19,7 +19,6 @@ #include <asm/dma.h> /* isa_dma_bridge_buggy */ #include "pci.h" -#if 0 /** * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children @@ -34,7 +33,7 @@ pci_bus_max_busnr(struct pci_bus* bus) struct list_head *tmp; unsigned char max, n; - max = bus->number; + max = bus->subordinate; list_for_each(tmp, &bus->children) { n = pci_bus_max_busnr(pci_bus_b(tmp)); if(n > max) @@ -42,7 +41,9 @@ pci_bus_max_busnr(struct pci_bus* bus) } return max; } +EXPORT_SYMBOL_GPL(pci_bus_max_busnr); +#if 0 /** * pci_max_busnr - returns maximum PCI bus number * @@ -495,9 +496,8 @@ pci_enable_device_bars(struct pci_dev *dev, int bars) int pci_enable_device(struct pci_dev *dev) { - int err; - - if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1))) + int err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); + if (err) return err; pci_fixup_device(pci_fixup_enable, dev); dev->is_enabled = 1; @@ -639,7 +639,7 @@ void pci_release_region(struct pci_dev *pdev, int bar) * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ -int pci_request_region(struct pci_dev *pdev, int bar, char *res_name) +int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { if (pci_resource_len(pdev, bar) == 0) return 0; @@ -697,7 +697,7 @@ void pci_release_regions(struct pci_dev *pdev) * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ -int pci_request_regions(struct pci_dev *pdev, char *res_name) +int pci_request_regions(struct pci_dev *pdev, const char *res_name) { int i; @@ -900,8 +900,12 @@ static int __devinit pci_setup(char *str) if (k) *k++ = 0; if (*str && (str = pcibios_setup(str)) && *str) { - /* PCI layer options should be handled here */ - printk(KERN_ERR "PCI: Unknown option `%s'\n", str); + if (!strcmp(str, "nomsi")) { + pci_no_msi(); + } else { + printk(KERN_ERR "PCI: Unknown option `%s'\n", + str); + } } str = k; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index a6dfee2f6d2..8f3fb47ea67 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -50,8 +50,10 @@ extern int pci_msi_quirk; #ifdef CONFIG_PCI_MSI void disable_msi_mode(struct pci_dev *dev, int pos, int type); +void pci_no_msi(void); #else static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { } +static inline void pci_no_msi(void) { } #endif extern int pcie_mch_quirk; diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index a63bd8f7260..1d317d22ee8 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -29,7 +29,6 @@ struct pcie_port_device_ext { int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ - unsigned int saved_msi_config_space[5]; }; extern struct bus_type pcie_port_bus_type; diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index e4e5f1e8d81..55c66226786 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -248,11 +248,10 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, { struct pcie_device *device; - device = kmalloc(sizeof(struct pcie_device), GFP_KERNEL); + device = kzalloc(sizeof(struct pcie_device), GFP_KERNEL); if (!device) return NULL; - memset(device, 0, sizeof(struct pcie_device)); pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); printk(KERN_DEBUG "Allocate Port Service[%s]\n", device->device.bus_id); return device; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 02260141dc8..50bfc1b2f3b 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -30,75 +30,16 @@ MODULE_LICENSE("GPL"); /* global data */ static const char device_name[] = "pcieport-driver"; -static void pci_save_msi_state(struct pci_dev *dev) +static int pcie_portdrv_save_config(struct pci_dev *dev) { - struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev); - int i = 0, pos; - u16 control; - - if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) <= 0) - return; - - pci_read_config_dword(dev, pos, &p_ext->saved_msi_config_space[i++]); - control = p_ext->saved_msi_config_space[0] >> 16; - pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, - &p_ext->saved_msi_config_space[i++]); - if (control & PCI_MSI_FLAGS_64BIT) { - pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, - &p_ext->saved_msi_config_space[i++]); - pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, - &p_ext->saved_msi_config_space[i++]); - } else - pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, - &p_ext->saved_msi_config_space[i++]); - if (control & PCI_MSI_FLAGS_MASKBIT) - pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, - &p_ext->saved_msi_config_space[i++]); -} - -static void pci_restore_msi_state(struct pci_dev *dev) -{ - struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev); - int i = 0, pos; - u16 control; - - if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) <= 0) - return; - - control = p_ext->saved_msi_config_space[i++] >> 16; - pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); - pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, - p_ext->saved_msi_config_space[i++]); - if (control & PCI_MSI_FLAGS_64BIT) { - pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, - p_ext->saved_msi_config_space[i++]); - pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, - p_ext->saved_msi_config_space[i++]); - } else - pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, - p_ext->saved_msi_config_space[i++]); - if (control & PCI_MSI_FLAGS_MASKBIT) - pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, - p_ext->saved_msi_config_space[i++]); -} - -static void pcie_portdrv_save_config(struct pci_dev *dev) -{ - struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev); - - pci_save_state(dev); - if (p_ext->interrupt_mode == PCIE_PORT_MSI_MODE) - pci_save_msi_state(dev); + return pci_save_state(dev); } static int pcie_portdrv_restore_config(struct pci_dev *dev) { - struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev); int retval; pci_restore_state(dev); - if (p_ext->interrupt_mode == PCIE_PORT_MSI_MODE) - pci_restore_msi_state(dev); retval = pci_enable_device(dev); if (retval) return retval; @@ -149,7 +90,8 @@ static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state) { int ret = pcie_port_device_suspend(dev, state); - pcie_portdrv_save_config(dev); + if (!ret) + ret = pcie_portdrv_save_config(dev); return ret; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index adfad4fd6a1..a10ed9dab2c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -33,10 +33,9 @@ LIST_HEAD(pci_devices); */ static void pci_create_legacy_files(struct pci_bus *b) { - b->legacy_io = kmalloc(sizeof(struct bin_attribute) * 2, + b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, GFP_ATOMIC); if (b->legacy_io) { - memset(b->legacy_io, 0, sizeof(struct bin_attribute) * 2); b->legacy_io->attr.name = "legacy_io"; b->legacy_io->size = 0xffff; b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; @@ -320,9 +319,8 @@ static struct pci_bus * __devinit pci_alloc_bus(void) { struct pci_bus *b; - b = kmalloc(sizeof(*b), GFP_KERNEL); + b = kzalloc(sizeof(*b), GFP_KERNEL); if (b) { - memset(b, 0, sizeof(*b)); INIT_LIST_HEAD(&b->node); INIT_LIST_HEAD(&b->children); INIT_LIST_HEAD(&b->devices); @@ -347,6 +345,7 @@ pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) child->parent = parent; child->ops = parent->ops; child->sysdata = parent->sysdata; + child->bus_flags = parent->bus_flags; child->bridge = get_device(&bridge->dev); child->class_dev.class = &pcibus_class; @@ -456,7 +455,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max * pass and just note the configuration. */ if (pass) - return max; + goto out; busnr = (buses >> 8) & 0xFF; /* @@ -466,12 +465,12 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max if (pci_find_bus(pci_domain_nr(bus), busnr)) { printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", pci_domain_nr(bus), busnr); - return max; + goto out; } child = pci_add_new_bus(bus, dev, busnr); if (!child) - return max; + goto out; child->primary = buses & 0xFF; child->subordinate = (buses >> 16) & 0xFF; child->bridge_ctl = bctl; @@ -496,7 +495,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max bus ranges. */ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses & ~0xffffff); - return max; + goto out; } /* Clear errors */ @@ -505,7 +504,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max /* Prevent assigning a bus number that already exists. * This can happen when a bridge is hot-plugged */ if (pci_find_bus(pci_domain_nr(bus), max+1)) - return max; + goto out; child = pci_add_new_bus(bus, dev, ++max); buses = (buses & 0xff000000) | ((unsigned int)(child->primary) << 0) @@ -537,6 +536,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max pci_fixup_parent_subordinate_busnr(child, max); /* Now we can scan all subordinate buses... */ max = pci_scan_child_bus(child); + /* + * now fix it up again since we have found + * the real value of max. + */ + pci_fixup_parent_subordinate_busnr(child, max); } else { /* * For CardBus bridges, we leave 4 bus numbers @@ -576,8 +580,6 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); } - pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); - sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number); while (bus->parent) { @@ -585,17 +587,22 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max (child->number > bus->subordinate) || (child->number < bus->number) || (child->subordinate < bus->number)) { - printk(KERN_WARNING "PCI: Bus #%02x (-#%02x) may be " + printk(KERN_WARNING "PCI: Bus #%02x (-#%02x) is " "hidden behind%s bridge #%02x (-#%02x)%s\n", child->number, child->subordinate, bus->self->transparent ? " transparent" : " ", bus->number, bus->subordinate, pcibios_assign_all_busses() ? " " : " (try 'pci=assign-busses')"); + printk(KERN_WARNING "Please report the result to " + "linux-kernel to fix this permanently\n"); } bus = bus->parent; } +out: + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); + return max; } @@ -788,11 +795,10 @@ pci_scan_device(struct pci_bus *bus, int devfn) if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type)) return NULL; - dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); + dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); if (!dev) return NULL; - memset(dev, 0, sizeof(struct pci_dev)); dev->bus = bus; dev->sysdata = bus->sysdata; dev->dev.parent = bus->bridge; diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 92a88576083..54b2ebc9c91 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -458,131 +458,6 @@ int pci_proc_detach_bus(struct pci_bus* bus) return 0; } -#ifdef CONFIG_PCI_LEGACY_PROC - -/* - * Backward compatible /proc/pci interface. - */ - -/* - * Convert some of the configuration space registers of the device at - * address (bus,devfn) into a string (possibly several lines each). - * The configuration string is stored starting at buf[len]. If the - * string would exceed the size of the buffer (SIZE), 0 is returned. - */ -static int show_dev_config(struct seq_file *m, void *v) -{ - struct pci_dev *dev = v; - struct pci_dev *first_dev; - struct pci_driver *drv; - u32 class_rev; - unsigned char latency, min_gnt, max_lat; - int reg; - - first_dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); - if (dev == first_dev) - seq_puts(m, "PCI devices found:\n"); - pci_dev_put(first_dev); - - drv = pci_dev_driver(dev); - - pci_user_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); - pci_user_read_config_byte (dev, PCI_LATENCY_TIMER, &latency); - pci_user_read_config_byte (dev, PCI_MIN_GNT, &min_gnt); - pci_user_read_config_byte (dev, PCI_MAX_LAT, &max_lat); - seq_printf(m, " Bus %2d, device %3d, function %2d:\n", - dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); - seq_printf(m, " Class %04x", class_rev >> 16); - seq_printf(m, ": PCI device %04x:%04x", dev->vendor, dev->device); - seq_printf(m, " (rev %d).\n", class_rev & 0xff); - - if (dev->irq) - seq_printf(m, " IRQ %d.\n", dev->irq); - - if (latency || min_gnt || max_lat) { - seq_printf(m, " Master Capable. "); - if (latency) - seq_printf(m, "Latency=%d. ", latency); - else - seq_puts(m, "No bursts. "); - if (min_gnt) - seq_printf(m, "Min Gnt=%d.", min_gnt); - if (max_lat) - seq_printf(m, "Max Lat=%d.", max_lat); - seq_putc(m, '\n'); - } - - for (reg = 0; reg < 6; reg++) { - struct resource *res = dev->resource + reg; - unsigned long base, end, flags; - - base = res->start; - end = res->end; - flags = res->flags; - if (!end) - continue; - - if (flags & PCI_BASE_ADDRESS_SPACE_IO) { - seq_printf(m, " I/O at 0x%lx [0x%lx].\n", - base, end); - } else { - const char *pref, *type = "unknown"; - - if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) - pref = "P"; - else - pref = "Non-p"; - switch (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) { - case PCI_BASE_ADDRESS_MEM_TYPE_32: - type = "32 bit"; break; - case PCI_BASE_ADDRESS_MEM_TYPE_1M: - type = "20 bit"; break; - case PCI_BASE_ADDRESS_MEM_TYPE_64: - type = "64 bit"; break; - } - seq_printf(m, " %srefetchable %s memory at " - "0x%lx [0x%lx].\n", pref, type, - base, - end); - } - } - return 0; -} - -static struct seq_operations proc_pci_op = { - .start = pci_seq_start, - .next = pci_seq_next, - .stop = pci_seq_stop, - .show = show_dev_config -}; - -static int proc_pci_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &proc_pci_op); -} -static struct file_operations proc_pci_operations = { - .open = proc_pci_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static void legacy_proc_init(void) -{ - struct proc_dir_entry * entry = create_proc_entry("pci", 0, NULL); - if (entry) - entry->proc_fops = &proc_pci_operations; -} - -#else - -static void legacy_proc_init(void) -{ - -} - -#endif /* CONFIG_PCI_LEGACY_PROC */ - static int proc_bus_pci_dev_open(struct inode *inode, struct file *file) { return seq_open(file, &proc_bus_pci_devices_op); @@ -606,7 +481,6 @@ static int __init pci_proc_init(void) while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_proc_attach_device(dev); } - legacy_proc_init(); return 0; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index dda6099903c..4970f47be72 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -575,8 +575,11 @@ static void __init quirk_amd_8131_ioapic(struct pci_dev *dev) { unsigned char revid, tmp; - pci_msi_quirk = 1; - printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n"); + if (dev->subordinate) { + printk(KERN_WARNING "PCI: MSI quirk detected. " + "PCI_BUS_FLAGS_NO_MSI set for subordinate bus.\n"); + dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; + } if (nr_ioapics == 0) return; @@ -934,6 +937,12 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) case 0x12bd: /* HP D530 */ asus_hides_smbus = 1; } + if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { + switch (dev->subsystem_device) { + case 0x099c: /* HP Compaq nx6110 */ + asus_hides_smbus = 1; + } + } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) switch(dev->subsystem_device) { @@ -1068,6 +1077,37 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_651, quirk_sis_96x_ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_735, quirk_sis_96x_compatible ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); +/* + * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller + * and MC97 modem controller are disabled when a second PCI soundcard is + * present. This patch, tweaking the VT8237 ISA bridge, enables them. + * -- bjd + */ +static void __init asus_hides_ac97_lpc(struct pci_dev *dev) +{ + u8 val; + int asus_hides_ac97 = 0; + + if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { + if (dev->device == PCI_DEVICE_ID_VIA_8237) + asus_hides_ac97 = 1; + } + + if (!asus_hides_ac97) + return; + + pci_read_config_byte(dev, 0x50, &val); + if (val & 0xc0) { + pci_write_config_byte(dev, 0x50, val & (~0xc0)); + pci_read_config_byte(dev, 0x50, &val); + if (val & 0xc0) + printk(KERN_INFO "PCI: onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val); + else + printk(KERN_INFO "PCI: enabled onboard AC97/MC97 devices\n"); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus ); @@ -1242,6 +1282,33 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pc DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh); +/* + * Fixup the cardbus bridges on the IBM Dock II docking station + */ +static void __devinit quirk_ibm_dock2_cardbus(struct pci_dev *dev) +{ + u32 val; + + /* + * tie the 2 interrupt pins to INTA, and configure the + * multifunction routing register to handle this. + */ + if ((dev->subsystem_vendor == PCI_VENDOR_ID_IBM) && + (dev->subsystem_device == 0x0148)) { + printk(KERN_INFO "PCI: Found IBM Dock II Cardbus Bridge " + "applying quirk\n"); + pci_read_config_dword(dev, 0x8c, &val); + val = ((val & 0xffffff00) | 0x1002); + pci_write_config_dword(dev, 0x8c, val); + pci_read_config_dword(dev, 0x80, &val); + val = ((val & 0x00ffff00) | 0x2864c077); + pci_write_config_dword(dev, 0x80, val); + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1420, + quirk_ibm_dock2_cardbus); + static void __devinit quirk_netmos(struct pci_dev *dev) { unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 05fa91a31c6..ce7dd6e7be6 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -246,9 +246,9 @@ pci_get_subsys(unsigned int vendor, unsigned int device, } dev = NULL; exit: - pci_dev_put(from); dev = pci_dev_get(dev); spin_unlock(&pci_bus_lock); + pci_dev_put(from); return dev; } @@ -339,9 +339,9 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) } dev = NULL; exit: - pci_dev_put(from); dev = pci_dev_get(dev); spin_unlock(&pci_bus_lock); + pci_dev_put(from); return dev; } diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c index 5e38cd7335f..c89c98a2cca 100644 --- a/drivers/pnp/pnpbios/rsparser.c +++ b/drivers/pnp/pnpbios/rsparser.c @@ -448,11 +448,7 @@ pnpbios_parse_resource_option_data(unsigned char * p, unsigned char * end, struc break; case SMALL_TAG_END: - if (option_independent != option) - printk(KERN_WARNING "PnPBIOS: Missing SMALL_TAG_ENDDEP tag\n"); - p = p + 2; - return (unsigned char *)p; - break; + return p + 2; default: /* an unkown tag */ len_err: diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index fafeeae5267..f9930552ab5 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -151,9 +151,9 @@ dasd_ioctl_enable(struct block_device *bdev, int no, long args) return -ENODEV; dasd_enable_device(device); /* Formatting the dasd device can change the capacity. */ - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); return 0; } @@ -184,9 +184,9 @@ dasd_ioctl_disable(struct block_device *bdev, int no, long args) * Set i_size to zero, since read, write, etc. check against this * value. */ - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); i_size_write(bdev->bd_inode, 0); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); return 0; } diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 0cf0e4c7ac0..39b760a2424 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -47,6 +47,7 @@ #include <linux/ide.h> #include <linux/scatterlist.h> #include <linux/delay.h> +#include <linux/mutex.h> #include <asm/io.h> #include <asm/bitops.h> @@ -109,7 +110,7 @@ typedef struct ide_scsi_obj { unsigned long log; /* log flags */ } idescsi_scsi_t; -static DECLARE_MUTEX(idescsi_ref_sem); +static DEFINE_MUTEX(idescsi_ref_mutex); #define ide_scsi_g(disk) \ container_of((disk)->private_data, struct ide_scsi_obj, driver) @@ -118,19 +119,19 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk) { struct ide_scsi_obj *scsi = NULL; - down(&idescsi_ref_sem); + mutex_lock(&idescsi_ref_mutex); scsi = ide_scsi_g(disk); if (scsi) scsi_host_get(scsi->host); - up(&idescsi_ref_sem); + mutex_unlock(&idescsi_ref_mutex); return scsi; } static void ide_scsi_put(struct ide_scsi_obj *scsi) { - down(&idescsi_ref_sem); + mutex_lock(&idescsi_ref_mutex); scsi_host_put(scsi->host); - up(&idescsi_ref_sem); + mutex_unlock(&idescsi_ref_mutex); } static inline idescsi_scsi_t *scsihost_to_idescsi(struct Scsi_Host *host) diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index f9c1192dc15..7c80711e18e 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -71,7 +71,7 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR); #define SR_CAPABILITIES \ (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \ CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \ - CDC_PLAY_AUDIO|CDC_RESET|CDC_IOCTLS|CDC_DRIVE_STATUS| \ + CDC_PLAY_AUDIO|CDC_RESET|CDC_DRIVE_STATUS| \ CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \ CDC_MRW|CDC_MRW_W|CDC_RAM) @@ -118,7 +118,6 @@ static struct cdrom_device_ops sr_dops = { .get_mcn = sr_get_mcn, .reset = sr_reset, .audio_ioctl = sr_audio_ioctl, - .dev_ioctl = sr_dev_ioctl, .capability = SR_CAPABILITIES, .generic_packet = sr_packet, }; @@ -456,17 +455,33 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, { struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk); struct scsi_device *sdev = cd->device; + void __user *argp = (void __user *)arg; + int ret; - /* - * Send SCSI addressing ioctls directly to mid level, send other - * ioctls to cdrom/block level. - */ - switch (cmd) { - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - return scsi_ioctl(sdev, cmd, (void __user *)arg); + /* + * Send SCSI addressing ioctls directly to mid level, send other + * ioctls to cdrom/block level. + */ + switch (cmd) { + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + return scsi_ioctl(sdev, cmd, argp); } - return cdrom_ioctl(file, &cd->cdi, inode, cmd, arg); + + ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg); + if (ret != ENOSYS) + return ret; + + /* + * ENODEV means that we didn't recognise the ioctl, or that we + * cannot execute it in the current device state. In either + * case fall through to scsi_ioctl, which will return ENDOEV again + * if it doesn't recognise the ioctl + */ + ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL); + if (ret != -ENODEV) + return ret; + return scsi_ioctl(sdev, cmd, argp); } static int sr_block_media_changed(struct gendisk *disk) diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index d2bcd99c272..d65de9621b2 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -55,7 +55,6 @@ int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *); int sr_reset(struct cdrom_device_info *); int sr_select_speed(struct cdrom_device_info *cdi, int speed); int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); -int sr_dev_ioctl(struct cdrom_device_info *, unsigned int, unsigned long); int sr_is_xa(Scsi_CD *); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index b65462f7648..d1268cb4683 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -562,22 +562,3 @@ int sr_is_xa(Scsi_CD *cd) #endif return is_xa; } - -int sr_dev_ioctl(struct cdrom_device_info *cdi, - unsigned int cmd, unsigned long arg) -{ - Scsi_CD *cd = cdi->handle; - int ret; - - ret = scsi_nonblockable_ioctl(cd->device, cmd, - (void __user *)arg, NULL); - /* - * ENODEV means that we didn't recognise the ioctl, or that we - * cannot execute it in the current device state. In either - * case fall through to scsi_ioctl, which will return ENDOEV again - * if it doesn't recognise the ioctl - */ - if (ret != -ENODEV) - return ret; - return scsi_ioctl(cd->device, cmd, (void __user *)arg); -} diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c index 7f0f35a05dc..b88a7c1158a 100644 --- a/drivers/serial/68328serial.c +++ b/drivers/serial/68328serial.c @@ -101,8 +101,6 @@ struct tty_driver *serial_driver; #define RS_ISR_PASS_LIMIT 256 -#define _INLINE_ inline - static void change_speed(struct m68k_serial *info); /* @@ -262,7 +260,7 @@ static void batten_down_hatches(void) /* Drop into the debugger */ } -static _INLINE_ void status_handle(struct m68k_serial *info, unsigned short status) +static void status_handle(struct m68k_serial *info, unsigned short status) { #if 0 if(status & DCD) { @@ -289,7 +287,8 @@ static _INLINE_ void status_handle(struct m68k_serial *info, unsigned short stat return; } -static _INLINE_ void receive_chars(struct m68k_serial *info, struct pt_regs *regs, unsigned short rx) +static void receive_chars(struct m68k_serial *info, struct pt_regs *regs, + unsigned short rx) { struct tty_struct *tty = info->tty; m68328_uart *uart = &uart_addr[info->line]; @@ -359,7 +358,7 @@ clear_and_exit: return; } -static _INLINE_ void transmit_chars(struct m68k_serial *info) +static void transmit_chars(struct m68k_serial *info) { m68328_uart *uart = &uart_addr[info->line]; diff --git a/drivers/serial/au1x00_uart.c b/drivers/serial/au1x00_uart.c index 29f94bbb79b..948880ac587 100644 --- a/drivers/serial/au1x00_uart.c +++ b/drivers/serial/au1x00_uart.c @@ -133,13 +133,12 @@ static const struct serial_uart_config uart_config[PORT_MAX_8250+1] = { { "AU1X00_UART",16, UART_CLEAR_FIFO | UART_USE_FIFO }, }; -static _INLINE_ unsigned int serial_in(struct uart_8250_port *up, int offset) +static unsigned int serial_in(struct uart_8250_port *up, int offset) { return au_readl((unsigned long)up->port.membase + offset); } -static _INLINE_ void -serial_out(struct uart_8250_port *up, int offset, int value) +static void serial_out(struct uart_8250_port *up, int offset, int value) { au_writel(value, (unsigned long)up->port.membase + offset); } @@ -237,7 +236,7 @@ static void serial8250_enable_ms(struct uart_port *port) serial_out(up, UART_IER, up->ier); } -static _INLINE_ void +static void receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs) { struct tty_struct *tty = up->port.info->tty; @@ -312,7 +311,7 @@ receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs) spin_lock(&up->port.lock); } -static _INLINE_ void transmit_chars(struct uart_8250_port *up) +static void transmit_chars(struct uart_8250_port *up) { struct circ_buf *xmit = &up->port.info->xmit; int count; @@ -346,7 +345,7 @@ static _INLINE_ void transmit_chars(struct uart_8250_port *up) serial8250_stop_tx(&up->port); } -static _INLINE_ void check_modem_status(struct uart_8250_port *up) +static void check_modem_status(struct uart_8250_port *up) { int status; diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index be12623d854..89700141f87 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c @@ -481,8 +481,6 @@ static char *serial_version = "$Revision: 1.25 $"; #include "serial_compat.h" #endif -#define _INLINE_ inline - struct tty_driver *serial_driver; /* serial subtype definitions */ @@ -591,8 +589,6 @@ static void rs_throttle(struct tty_struct * tty); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); static int rs_write(struct tty_struct * tty, int from_user, const unsigned char *buf, int count); -extern _INLINE_ int rs_raw_write(struct tty_struct * tty, int from_user, - const unsigned char *buf, int count); #ifdef CONFIG_ETRAX_RS485 static int e100_write_rs485(struct tty_struct * tty, int from_user, const unsigned char *buf, int count); @@ -1538,8 +1534,7 @@ e100_enable_rxdma_irq(struct e100_serial *info) /* the tx DMA uses only dma_descr interrupt */ -static _INLINE_ void -e100_disable_txdma_irq(struct e100_serial *info) +static void e100_disable_txdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("txdma_irq(%d): 0\n",info->line); @@ -1548,8 +1543,7 @@ e100_disable_txdma_irq(struct e100_serial *info) *R_IRQ_MASK2_CLR = info->irq; } -static _INLINE_ void -e100_enable_txdma_irq(struct e100_serial *info) +static void e100_enable_txdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("txdma_irq(%d): 1\n",info->line); @@ -1558,8 +1552,7 @@ e100_enable_txdma_irq(struct e100_serial *info) *R_IRQ_MASK2_SET = info->irq; } -static _INLINE_ void -e100_disable_txdma_channel(struct e100_serial *info) +static void e100_disable_txdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1599,8 +1592,7 @@ e100_disable_txdma_channel(struct e100_serial *info) } -static _INLINE_ void -e100_enable_txdma_channel(struct e100_serial *info) +static void e100_enable_txdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1625,8 +1617,7 @@ e100_enable_txdma_channel(struct e100_serial *info) restore_flags(flags); } -static _INLINE_ void -e100_disable_rxdma_channel(struct e100_serial *info) +static void e100_disable_rxdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1665,8 +1656,7 @@ e100_disable_rxdma_channel(struct e100_serial *info) } -static _INLINE_ void -e100_enable_rxdma_channel(struct e100_serial *info) +static void e100_enable_rxdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1913,9 +1903,7 @@ rs_start(struct tty_struct *tty) * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ -static _INLINE_ void -rs_sched_event(struct e100_serial *info, - int event) +static void rs_sched_event(struct e100_serial *info, int event) { if (info->event & (1 << event)) return; @@ -2155,8 +2143,9 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl return 1; } -extern _INLINE_ unsigned int -handle_descr_data(struct e100_serial *info, struct etrax_dma_descr *descr, unsigned int recvl) +static unsigned int handle_descr_data(struct e100_serial *info, + struct etrax_dma_descr *descr, + unsigned int recvl) { struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer; @@ -2182,8 +2171,7 @@ handle_descr_data(struct e100_serial *info, struct etrax_dma_descr *descr, unsig return recvl; } -static _INLINE_ unsigned int -handle_all_descr_data(struct e100_serial *info) +static unsigned int handle_all_descr_data(struct e100_serial *info) { struct etrax_dma_descr *descr; unsigned int recvl; @@ -2230,8 +2218,7 @@ handle_all_descr_data(struct e100_serial *info) return ret; } -static _INLINE_ void -receive_chars_dma(struct e100_serial *info) +static void receive_chars_dma(struct e100_serial *info) { struct tty_struct *tty; unsigned char rstat; @@ -2292,8 +2279,7 @@ receive_chars_dma(struct e100_serial *info) *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart); } -static _INLINE_ int -start_recv_dma(struct e100_serial *info) +static int start_recv_dma(struct e100_serial *info) { struct etrax_dma_descr *descr = info->rec_descr; struct etrax_recv_buffer *buffer; @@ -2348,11 +2334,6 @@ start_receive(struct e100_serial *info) } -static _INLINE_ void -status_handle(struct e100_serial *info, unsigned short status) -{ -} - /* the bits in the MASK2 register are laid out like this: DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR where I is the input channel and O is the output channel for the port. @@ -2454,8 +2435,7 @@ rec_interrupt(int irq, void *dev_id, struct pt_regs * regs) return IRQ_RETVAL(handled); } /* rec_interrupt */ -static _INLINE_ int -force_eop_if_needed(struct e100_serial *info) +static int force_eop_if_needed(struct e100_serial *info) { /* We check data_avail bit to determine if data has * arrived since last time @@ -2499,8 +2479,7 @@ force_eop_if_needed(struct e100_serial *info) return 1; } -extern _INLINE_ void -flush_to_flip_buffer(struct e100_serial *info) +static void flush_to_flip_buffer(struct e100_serial *info) { struct tty_struct *tty; struct etrax_recv_buffer *buffer; @@ -2611,8 +2590,7 @@ flush_to_flip_buffer(struct e100_serial *info) tty_flip_buffer_push(tty); } -static _INLINE_ void -check_flush_timeout(struct e100_serial *info) +static void check_flush_timeout(struct e100_serial *info) { /* Flip what we've got (if we can) */ flush_to_flip_buffer(info); @@ -2741,7 +2719,7 @@ TODO: The break will be delayed until an F or V character is received. */ -extern _INLINE_ +static struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info) { unsigned long data_read; @@ -2875,8 +2853,7 @@ more_data: return info; } -extern _INLINE_ -struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info) +static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info) { unsigned char rstat; @@ -2995,7 +2972,7 @@ struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info) return info; } /* handle_ser_rx_interrupt */ -extern _INLINE_ void handle_ser_tx_interrupt(struct e100_serial *info) +static void handle_ser_tx_interrupt(struct e100_serial *info) { unsigned long flags; @@ -3621,9 +3598,8 @@ rs_flush_chars(struct tty_struct *tty) restore_flags(flags); } -extern _INLINE_ int -rs_raw_write(struct tty_struct * tty, int from_user, - const unsigned char *buf, int count) +static int rs_raw_write(struct tty_struct * tty, int from_user, + const unsigned char *buf, int count) { int c, ret = 0; struct e100_serial *info = (struct e100_serial *)tty->driver_data; @@ -4710,7 +4686,7 @@ rs_open(struct tty_struct *tty, struct file * filp) * /proc fs routines.... */ -extern _INLINE_ int line_info(char *buf, struct e100_serial *info) +static int line_info(char *buf, struct e100_serial *info) { char stat_buf[30]; int ret; diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c index 876bc5e027b..e9c10c0a30f 100644 --- a/drivers/serial/m32r_sio.c +++ b/drivers/serial/m32r_sio.c @@ -248,17 +248,17 @@ static void sio_error(int *status) #endif /* CONFIG_SERIAL_M32R_PLDSIO */ -static _INLINE_ unsigned int sio_in(struct uart_sio_port *up, int offset) +static unsigned int sio_in(struct uart_sio_port *up, int offset) { return __sio_in(up->port.iobase + offset); } -static _INLINE_ void sio_out(struct uart_sio_port *up, int offset, int value) +static void sio_out(struct uart_sio_port *up, int offset, int value) { __sio_out(value, up->port.iobase + offset); } -static _INLINE_ unsigned int serial_in(struct uart_sio_port *up, int offset) +static unsigned int serial_in(struct uart_sio_port *up, int offset) { if (!offset) return 0; @@ -266,8 +266,7 @@ static _INLINE_ unsigned int serial_in(struct uart_sio_port *up, int offset) return __sio_in(offset); } -static _INLINE_ void -serial_out(struct uart_sio_port *up, int offset, int value) +static void serial_out(struct uart_sio_port *up, int offset, int value) { if (!offset) return; @@ -326,8 +325,8 @@ static void m32r_sio_enable_ms(struct uart_port *port) serial_out(up, UART_IER, up->ier); } -static _INLINE_ void receive_chars(struct uart_sio_port *up, int *status, - struct pt_regs *regs) +static void receive_chars(struct uart_sio_port *up, int *status, + struct pt_regs *regs) { struct tty_struct *tty = up->port.info->tty; unsigned char ch; @@ -400,7 +399,7 @@ static _INLINE_ void receive_chars(struct uart_sio_port *up, int *status, tty_flip_buffer_push(tty); } -static _INLINE_ void transmit_chars(struct uart_sio_port *up) +static void transmit_chars(struct uart_sio_port *up) { struct circ_buf *xmit = &up->port.info->xmit; int count; diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 7fc3d3b41d1..9fe2283d91e 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -102,9 +102,7 @@ struct uart_sunsu_port { #endif }; -#define _INLINE_ - -static _INLINE_ unsigned int serial_in(struct uart_sunsu_port *up, int offset) +static unsigned int serial_in(struct uart_sunsu_port *up, int offset) { offset <<= up->port.regshift; @@ -121,8 +119,7 @@ static _INLINE_ unsigned int serial_in(struct uart_sunsu_port *up, int offset) } } -static _INLINE_ void -serial_out(struct uart_sunsu_port *up, int offset, int value) +static void serial_out(struct uart_sunsu_port *up, int offset, int value) { #ifndef CONFIG_SPARC64 /* @@ -316,7 +313,7 @@ static void sunsu_enable_ms(struct uart_port *port) spin_unlock_irqrestore(&up->port.lock, flags); } -static _INLINE_ struct tty_struct * +static struct tty_struct * receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs *regs) { struct tty_struct *tty = up->port.info->tty; @@ -395,7 +392,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs return tty; } -static _INLINE_ void transmit_chars(struct uart_sunsu_port *up) +static void transmit_chars(struct uart_sunsu_port *up) { struct circ_buf *xmit = &up->port.info->xmit; int count; @@ -431,7 +428,7 @@ static _INLINE_ void transmit_chars(struct uart_sunsu_port *up) __stop_tx(up); } -static _INLINE_ void check_modem_status(struct uart_sunsu_port *up) +static void check_modem_status(struct uart_sunsu_port *up) { int status; diff --git a/drivers/tc/zs.c b/drivers/tc/zs.c index 6756d0fab6f..2dffa8e303b 100644 --- a/drivers/tc/zs.c +++ b/drivers/tc/zs.c @@ -186,8 +186,6 @@ static struct tty_driver *serial_driver; #define RS_STROBE_TIME 10 #define RS_ISR_PASS_LIMIT 256 -#define _INLINE_ inline - static void probe_sccs(void); static void change_speed(struct dec_serial *info); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); @@ -344,14 +342,13 @@ static inline void rs_recv_clear(struct dec_zschannel *zsc) * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ -static _INLINE_ void rs_sched_event(struct dec_serial *info, int event) +static void rs_sched_event(struct dec_serial *info, int event) { info->event |= 1 << event; tasklet_schedule(&info->tlet); } -static _INLINE_ void receive_chars(struct dec_serial *info, - struct pt_regs *regs) +static void receive_chars(struct dec_serial *info, struct pt_regs *regs) { struct tty_struct *tty = info->tty; unsigned char ch, stat, flag; @@ -441,7 +438,7 @@ static void transmit_chars(struct dec_serial *info) rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); } -static _INLINE_ void status_handle(struct dec_serial *info) +static void status_handle(struct dec_serial *info) { unsigned char stat; diff --git a/fs/9p/mux.c b/fs/9p/mux.c index ea1134eb47c..8e8356c1c22 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c @@ -31,6 +31,7 @@ #include <linux/poll.h> #include <linux/kthread.h> #include <linux/idr.h> +#include <linux/mutex.h> #include "debug.h" #include "v9fs.h" @@ -110,7 +111,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); -static DECLARE_MUTEX(v9fs_mux_task_lock); +static DEFINE_MUTEX(v9fs_mux_task_lock); static struct workqueue_struct *v9fs_mux_wq; static int v9fs_mux_num; @@ -166,7 +167,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, v9fs_mux_poll_task_num); - up(&v9fs_mux_task_lock); + mutex_lock(&v9fs_mux_task_lock); n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); if (n > v9fs_mux_poll_task_num) { @@ -225,7 +226,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) } v9fs_mux_num++; - down(&v9fs_mux_task_lock); + mutex_unlock(&v9fs_mux_task_lock); return 0; } @@ -235,7 +236,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) int i; struct v9fs_mux_poll_task *vpt; - up(&v9fs_mux_task_lock); + mutex_lock(&v9fs_mux_task_lock); vpt = m->poll_task; list_del(&m->mux_list); for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { @@ -252,7 +253,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) v9fs_mux_poll_task_num--; } v9fs_mux_num--; - down(&v9fs_mux_task_lock); + mutex_unlock(&v9fs_mux_task_lock); } /** diff --git a/fs/adfs/file.c b/fs/adfs/file.c index afebbfde696..6af10885f9d 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -19,11 +19,7 @@ * * adfs regular file handling primitives */ -#include <linux/errno.h> #include <linux/fs.h> -#include <linux/fcntl.h> -#include <linux/time.h> -#include <linux/stat.h> #include <linux/buffer_head.h> /* for file_fsync() */ #include <linux/adfs_fs.h> diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 385bed09b0d..f54c5b21f87 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -13,6 +13,7 @@ /* Internal header file for autofs */ #include <linux/auto_fs4.h> +#include <linux/mutex.h> #include <linux/list.h> /* This is the range of ioctl() numbers we claim as ours */ @@ -102,7 +103,7 @@ struct autofs_sb_info { int reghost_enabled; int needs_reghost; struct super_block *sb; - struct semaphore wq_sem; + struct mutex wq_mutex; spinlock_t fs_lock; struct autofs_wait_queue *queues; /* Wait queue pointer */ }; diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 2d3082854a2..1ad98d48e55 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -269,7 +269,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) sbi->sb = s; sbi->version = 0; sbi->sub_version = 0; - init_MUTEX(&sbi->wq_sem); + mutex_init(&sbi->wq_mutex); spin_lock_init(&sbi->fs_lock); sbi->queues = NULL; s->s_blocksize = 1024; diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 394ff36ef8f..be78e9378c0 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -178,7 +178,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, return -ENOENT; } - if (down_interruptible(&sbi->wq_sem)) { + if (mutex_lock_interruptible(&sbi->wq_mutex)) { kfree(name); return -EINTR; } @@ -194,7 +194,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, /* Can't wait for an expire if there's no mount */ if (notify == NFY_NONE && !d_mountpoint(dentry)) { kfree(name); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -ENOENT; } @@ -202,7 +202,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); if ( !wq ) { kfree(name); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -ENOMEM; } @@ -218,10 +218,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq->status = -EINTR; /* Status return if interrupted */ atomic_set(&wq->wait_ctr, 2); atomic_set(&wq->notified, 1); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); } else { atomic_inc(&wq->wait_ctr); - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); kfree(name); DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); @@ -282,19 +282,19 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok { struct autofs_wait_queue *wq, **wql; - down(&sbi->wq_sem); + mutex_lock(&sbi->wq_mutex); for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { if ( wq->wait_queue_token == wait_queue_token ) break; } if ( !wq ) { - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); return -EINVAL; } *wql = wq->next; /* Unlink from chain */ - up(&sbi->wq_sem); + mutex_unlock(&sbi->wq_mutex); kfree(wq->name); wq->name = NULL; /* Do not wait on this queue */ @@ -1243,11 +1243,11 @@ static int __init init_bio(void) scale = 4; /* - * scale number of entries + * Limit number of entries reserved -- mempools are only used when + * the system is completely unable to allocate memory, so we only + * need enough to make progress. */ - bvec_pool_entries = megabytes * 2; - if (bvec_pool_entries > 256) - bvec_pool_entries = 256; + bvec_pool_entries = 1 + scale; fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); if (!fs_bio_set) diff --git a/fs/block_dev.c b/fs/block_dev.c index 6e50346fb1e..44d05e6e34d 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -265,8 +265,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) SLAB_CTOR_CONSTRUCTOR) { memset(bdev, 0, sizeof(*bdev)); - sema_init(&bdev->bd_sem, 1); - sema_init(&bdev->bd_mount_sem, 1); + mutex_init(&bdev->bd_mutex); + mutex_init(&bdev->bd_mount_mutex); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); inode_init_once(&ei->vfs_inode); @@ -574,7 +574,7 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; @@ -605,21 +605,21 @@ static int do_open(struct block_device *bdev, struct file *file) if (ret) goto out_first; bdev->bd_contains = whole; - down(&whole->bd_sem); + mutex_lock(&whole->bd_mutex); whole->bd_part_count++; p = disk->part[part - 1]; bdev->bd_inode->i_data.backing_dev_info = whole->bd_inode->i_data.backing_dev_info; if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { whole->bd_part_count--; - up(&whole->bd_sem); + mutex_unlock(&whole->bd_mutex); ret = -ENXIO; goto out_first; } kobject_get(&p->kobj); bdev->bd_part = p; bd_set_size(bdev, (loff_t) p->nr_sects << 9); - up(&whole->bd_sem); + mutex_unlock(&whole->bd_mutex); } } else { put_disk(disk); @@ -633,13 +633,13 @@ static int do_open(struct block_device *bdev, struct file *file) if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); } else { - down(&bdev->bd_contains->bd_sem); + mutex_lock(&bdev->bd_contains->bd_mutex); bdev->bd_contains->bd_part_count++; - up(&bdev->bd_contains->bd_sem); + mutex_unlock(&bdev->bd_contains->bd_mutex); } } bdev->bd_openers++; - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); return 0; @@ -652,7 +652,7 @@ out_first: put_disk(disk); module_put(owner); out: - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); if (ret) bdput(bdev); @@ -714,7 +714,7 @@ int blkdev_put(struct block_device *bdev) struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; - down(&bdev->bd_sem); + mutex_lock(&bdev->bd_mutex); lock_kernel(); if (!--bdev->bd_openers) { sync_blockdev(bdev); @@ -724,9 +724,9 @@ int blkdev_put(struct block_device *bdev) if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); } else { - down(&bdev->bd_contains->bd_sem); + mutex_lock(&bdev->bd_contains->bd_mutex); bdev->bd_contains->bd_part_count--; - up(&bdev->bd_contains->bd_sem); + mutex_unlock(&bdev->bd_contains->bd_mutex); } if (!bdev->bd_openers) { struct module *owner = disk->fops->owner; @@ -746,7 +746,7 @@ int blkdev_put(struct block_device *bdev) bdev->bd_contains = NULL; } unlock_kernel(); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); bdput(bdev); return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index 1d3683d496f..0d6ca7bac6c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -201,7 +201,7 @@ int fsync_bdev(struct block_device *bdev) * freeze_bdev -- lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * - * This takes the block device bd_mount_sem to make sure no new mounts + * This takes the block device bd_mount_mutex to make sure no new mounts * happen on bdev until thaw_bdev() is called. * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. @@ -210,7 +210,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) { struct super_block *sb; - down(&bdev->bd_mount_sem); + mutex_lock(&bdev->bd_mount_mutex); sb = get_super(bdev); if (sb && !(sb->s_flags & MS_RDONLY)) { sb->s_frozen = SB_FREEZE_WRITE; @@ -264,7 +264,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) drop_super(sb); } - up(&bdev->bd_mount_sem); + mutex_unlock(&bdev->bd_mount_mutex); } EXPORT_SYMBOL(thaw_bdev); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index fed55e3c53d..632561dd9c5 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -138,9 +138,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -317,9 +317,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) rc = -ENOMEM; else if (pTcon->ses->capabilities & CAP_UNIX) { diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c index a7a47bb36bf..ec4dfe9bf5e 100644 --- a/fs/cifs/fcntl.c +++ b/fs/cifs/fcntl.c @@ -86,9 +86,9 @@ int cifs_dir_notify(struct file * file, unsigned long arg) cifs_sb = CIFS_SB(file->f_dentry->d_sb); pTcon = cifs_sb->tcon; - down(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { rc = -ENOMEM; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 675bd256829..165d6742638 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -203,9 +203,9 @@ int cifs_open(struct inode *inode, struct file *file) } } - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 59359911f48..ff93a9f81d1 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -574,9 +574,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) /* Unlink can be called from rename so we can not grab the sem here since we deadlock otherwise */ -/* down(&direntry->d_sb->s_vfs_rename_sem);*/ +/* mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/ full_path = build_path_from_dentry(direntry); -/* up(&direntry->d_sb->s_vfs_rename_sem);*/ +/* mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/ if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -718,9 +718,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -803,9 +803,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -1137,9 +1137,9 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) rc = 0; } - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 0f99aae3316..8d0da7c87c7 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -48,10 +48,10 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, /* No need to check for cross device links since server will do that BB note DFS case in future though (when we may have to check) */ - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); fromName = build_path_from_dentry(old_file); toName = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if((fromName == NULL) || (toName == NULL)) { rc = -ENOMEM; goto cifs_hl_exit; @@ -103,9 +103,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd) xid = GetXid(); - down(&direntry->d_sb->s_vfs_rename_sem); + mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&direntry->d_sb->s_vfs_rename_sem); + mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); if (!full_path) goto out_no_free; @@ -164,9 +164,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - down(&inode->i_sb->s_vfs_rename_sem); + mutex_lock(&inode->i_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); @@ -232,9 +232,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) /* BB would it be safe against deadlock to grab this sem even though rename itself grabs the sem and calls lookup? */ -/* down(&inode->i_sb->s_vfs_rename_sem);*/ +/* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/ full_path = build_path_from_dentry(direntry); -/* up(&inode->i_sb->s_vfs_rename_sem);*/ +/* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/ if(full_path == NULL) { FreeXid(xid); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 288cc048d37..edb3b6eb34b 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -404,9 +404,9 @@ static int initiate_cifs_search(const int xid, struct file *file) if(pTcon == NULL) return -EINVAL; - down(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(file->f_dentry); - up(&file->f_dentry->d_sb->s_vfs_rename_sem); + mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); if(full_path == NULL) { return -ENOMEM; diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 777e3363c2a..3938444d87b 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -62,9 +62,9 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -116,9 +116,9 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -223,9 +223,9 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -341,9 +341,9 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - down(&sb->s_vfs_rename_sem); + mutex_lock(&sb->s_vfs_rename_mutex); full_path = build_path_from_dentry(direntry); - up(&sb->s_vfs_rename_sem); + mutex_unlock(&sb->s_vfs_rename_mutex); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index bfb8a230bac..14c5620b5ca 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -18,6 +18,7 @@ #include <linux/mount.h> #include <linux/tty.h> #include <linux/devpts_fs.h> +#include <linux/parser.h> #define DEVPTS_SUPER_MAGIC 0x1cd1 @@ -32,39 +33,60 @@ static struct { umode_t mode; } config = {.mode = 0600}; +enum { + Opt_uid, Opt_gid, Opt_mode, + Opt_err +}; + +static match_table_t tokens = { + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_mode, "mode=%o"}, + {Opt_err, NULL} +}; + static int devpts_remount(struct super_block *sb, int *flags, char *data) { - int setuid = 0; - int setgid = 0; - uid_t uid = 0; - gid_t gid = 0; - umode_t mode = 0600; - char *this_char; - - this_char = NULL; - while ((this_char = strsep(&data, ",")) != NULL) { - int n; - char dummy; - if (!*this_char) + char *p; + + config.setuid = 0; + config.setgid = 0; + config.uid = 0; + config.gid = 0; + config.mode = 0600; + + while ((p = strsep(&data, ",")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + int token; + int option; + + if (!*p) continue; - if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) { - setuid = 1; - uid = n; - } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) { - setgid = 1; - gid = n; - } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1) - mode = n & ~S_IFMT; - else { - printk("devpts: called with bogus options\n"); + + token = match_token(p, tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + return -EINVAL; + config.uid = option; + config.setuid = 1; + break; + case Opt_gid: + if (match_int(&args[0], &option)) + return -EINVAL; + config.gid = option; + config.setgid = 1; + break; + case Opt_mode: + if (match_octal(&args[0], &option)) + return -EINVAL; + config.mode = option & ~S_IFMT; + break; + default: + printk(KERN_ERR "devpts: called with bogus options\n"); return -EINVAL; } } - config.setuid = setuid; - config.setgid = setgid; - config.uid = uid; - config.gid = gid; - config.mode = mode; return 0; } diff --git a/fs/dquot.c b/fs/dquot.c index 1966c890b48..acf07e581f8 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -103,12 +103,12 @@ * (these locking rules also apply for S_NOQUOTA flag in the inode - note that * for altering the flag i_mutex is also needed). If operation is holding * reference to dquot in other way (e.g. quotactl ops) it must be guarded by - * dqonoff_sem. + * dqonoff_mutex. * This locking assures that: * a) update/access to dquot pointers in inode is serialized * b) everyone is guarded against invalidate_dquots() * - * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced + * Each dquot has its dq_lock mutex. Locked dquots might not be referenced * from inodes (dquot_alloc_space() and such don't check the dq_lock). * Currently dquot is locked only when it is being read to memory (or space for * it is being allocated) on the first dqget() and when it is being released on @@ -118,9 +118,9 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > - * > dquot->dq_lock > dqio_sem - * i_mutex on quota files is special (it's below dqio_sem) + * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > + * dqio_mutex + * i_mutex on quota files is special (it's below dqio_mutex) */ static DEFINE_SPINLOCK(dq_list_lock); @@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot) static void wait_on_dquot(struct dquot *dquot) { - down(&dquot->dq_lock); - up(&dquot->dq_lock); + mutex_lock(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); } #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) @@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dquot->dq_lock); - down(&dqopt->dqio_sem); + mutex_lock(&dquot->dq_lock); + mutex_lock(&dqopt->dqio_mutex); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); if (ret < 0) @@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot) } set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: - up(&dqopt->dqio_sem); - up(&dquot->dq_lock); + mutex_unlock(&dqopt->dqio_mutex); + mutex_unlock(&dquot->dq_lock); return ret; } @@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); spin_lock(&dq_list_lock); if (!clear_dquot_dirty(dquot)) { spin_unlock(&dq_list_lock); @@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot) ret = ret2; } out_sem: - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); return ret; } @@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - down(&dquot->dq_lock); + mutex_lock(&dquot->dq_lock); /* Check whether we are not racing with some other dqget() */ if (atomic_read(&dquot->dq_count) > 1) goto out_dqlock; - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); if (dqopt->ops[dquot->dq_type]->release_dqblk) { ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); /* Write the info */ @@ -399,31 +399,57 @@ int dquot_release(struct dquot *dquot) ret = ret2; } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); out_dqlock: - up(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); return ret; } /* Invalidate all dquots on the list. Note that this function is called after * quota is disabled and pointers from inodes removed so there cannot be new - * quota users. Also because we hold dqonoff_sem there can be no quota users - * for this sb+type at all. */ + * quota users. There can still be some users of quotas due to inodes being + * just deleted or pruned by prune_icache() (those are not attached to any + * list). We have to wait for such users. + */ static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot, *tmp; +restart: spin_lock(&dq_list_lock); list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { if (dquot->dq_sb != sb) continue; if (dquot->dq_type != type) continue; -#ifdef __DQUOT_PARANOIA - if (atomic_read(&dquot->dq_count)) - BUG(); -#endif - /* Quota now has no users and it has been written on last dqput() */ + /* Wait for dquot users */ + if (atomic_read(&dquot->dq_count)) { + DEFINE_WAIT(wait); + + atomic_inc(&dquot->dq_count); + prepare_to_wait(&dquot->dq_wait_unused, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&dq_list_lock); + /* Once dqput() wakes us up, we know it's time to free + * the dquot. + * IMPORTANT: we rely on the fact that there is always + * at most one process waiting for dquot to free. + * Otherwise dq_count would be > 1 and we would never + * wake up. + */ + if (atomic_read(&dquot->dq_count) > 1) + schedule(); + finish_wait(&dquot->dq_wait_unused, &wait); + dqput(dquot); + /* At this moment dquot() need not exist (it could be + * reclaimed by prune_dqcache(). Hence we must + * restart. */ + goto restart; + } + /* + * Quota now has no users and it has been written on last + * dqput() + */ remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); @@ -439,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type) struct quota_info *dqopt = sb_dqopt(sb); int cnt; - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; @@ -474,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type) spin_lock(&dq_list_lock); dqstats.syncs++; spin_unlock(&dq_list_lock); - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); return 0; } @@ -515,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) /* * Put reference to dquot * NOTE: If you change this function please check whether dqput_blocks() works right... - * MUST be called with either dqptr_sem or dqonoff_sem held + * MUST be called with either dqptr_sem or dqonoff_mutex held */ static void dqput(struct dquot *dquot) { @@ -540,6 +566,10 @@ we_slept: if (atomic_read(&dquot->dq_count) > 1) { /* We have more than one user... nothing to do */ atomic_dec(&dquot->dq_count); + /* Releasing dquot during quotaoff phase? */ + if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && + atomic_read(&dquot->dq_count) == 1) + wake_up(&dquot->dq_wait_unused); spin_unlock(&dq_list_lock); return; } @@ -576,11 +606,12 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) return NODQUOT; memset((caddr_t)dquot, 0, sizeof(struct dquot)); - sema_init(&dquot->dq_lock, 1); + mutex_init(&dquot->dq_lock); INIT_LIST_HEAD(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_inuse); INIT_HLIST_NODE(&dquot->dq_hash); INIT_LIST_HEAD(&dquot->dq_dirty); + init_waitqueue_head(&dquot->dq_wait_unused); dquot->dq_sb = sb; dquot->dq_type = type; atomic_set(&dquot->dq_count, 1); @@ -590,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) /* * Get reference to dquot - * MUST be called with either dqptr_sem or dqonoff_sem held + * MUST be called with either dqptr_sem or dqonoff_mutex held */ static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { @@ -656,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type) return 0; } -/* This routine is guarded by dqonoff_sem semaphore */ +/* This routine is guarded by dqonoff_mutex mutex */ static void add_dquot_ref(struct super_block *sb, int type) { struct list_head *p; @@ -732,13 +763,9 @@ static void drop_dquot_ref(struct super_block *sb, int type) { LIST_HEAD(tofree_head); - /* We need to be guarded against prune_icache to reach all the - * inodes - otherwise some can be on the local list of prune_icache */ - down(&iprune_sem); down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); up_write(&sb_dqopt(sb)->dqptr_sem); - up(&iprune_sem); put_dquot_list(&tofree_head); } @@ -938,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type) unsigned int id = 0; int cnt, ret = 0; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return 0; down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1002,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) { out_add: inode_add_bytes(inode, number); @@ -1051,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; for (cnt = 0; cnt < MAXQUOTAS; cnt++) @@ -1095,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number) { unsigned int cnt; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) { out_sub: inode_sub_bytes(inode, number); @@ -1131,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number) { unsigned int cnt; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1171,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; char warntype[MAXQUOTAS]; - /* First test before acquiring semaphore - solves deadlocks when we - * re-enter the quota code and are already holding the semaphore */ + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ if (IS_NOQUOTA(inode)) return QUOTA_OK; /* Clear the arrays */ @@ -1266,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type) int ret; struct quota_info *dqopt = sb_dqopt(sb); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); ret = dqopt->ops[type]->write_file_info(sb, type); - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); return ret; } @@ -1324,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type) struct inode *toputinode[MAXQUOTAS]; /* We need to serialize quota_off() for device */ - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { toputinode[cnt] = NULL; if (type != -1 && cnt != type) @@ -1353,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type) dqopt->info[cnt].dqi_bgrace = 0; dqopt->ops[cnt] = NULL; } - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); /* Sync the superblock so that buffers with quota data are written to * disk (and so userspace sees correct data afterwards). */ if (sb->s_op->sync_fs) @@ -1366,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type) * changes done by userspace on the next quotaon() */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (toputinode[cnt]) { - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); /* If quota was reenabled in the meantime, we have * nothing to do */ if (!sb_has_quota_enabled(sb, cnt)) { @@ -1378,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type) mark_inode_dirty(toputinode[cnt]); iput(toputinode[cnt]); } - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); } if (sb->s_bdev) invalidate_bdev(sb->s_bdev, 0); @@ -1419,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) /* And now flush the block cache so that kernel sees the changes */ invalidate_bdev(sb->s_bdev, 0); mutex_lock(&inode->i_mutex); - down(&dqopt->dqonoff_sem); + mutex_lock(&dqopt->dqonoff_mutex); if (sb_has_quota_enabled(sb, type)) { error = -EBUSY; goto out_lock; @@ -1444,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) dqopt->ops[type] = fmt->qf_ops; dqopt->info[type].dqi_format = fmt; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); - down(&dqopt->dqio_sem); + mutex_lock(&dqopt->dqio_mutex); if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); goto out_file_init; } - up(&dqopt->dqio_sem); + mutex_unlock(&dqopt->dqio_mutex); mutex_unlock(&inode->i_mutex); set_enable_flags(dqopt, type); add_dquot_ref(sb, type); - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); return 0; @@ -1462,7 +1489,7 @@ out_file_init: dqopt->files[type] = NULL; iput(inode); out_lock: - up(&dqopt->dqonoff_sem); + mutex_unlock(&dqopt->dqonoff_mutex); if (oldflags != -1) { down_write(&dqopt->dqptr_sem); /* Set the flags back (in the case of accidental quotaon() @@ -1550,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!(dquot = dqget(sb, id, type))) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } do_get_dqblk(dquot, di); dqput(dquot); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1619,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!(dquot = dqget(sb, id, type))) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } do_set_dqblk(dquot, di); dqput(dquot); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1635,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_enabled(sb, type)) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1647,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) ii->dqi_flags = mi->dqi_flags & DQF_MASK; ii->dqi_valid = IIF_ALL; spin_unlock(&dq_data_lock); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } @@ -1656,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); if (!sb_has_quota_enabled(sb, type)) { - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1673,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mark_info_dirty(sb, type); /* Force write to disk */ sb->dq_op->write_info(sb, type); - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4284cd31eba..1c2b16fda13 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -34,6 +34,7 @@ #include <linux/eventpoll.h> #include <linux/mount.h> #include <linux/bitops.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> @@ -46,7 +47,7 @@ * LOCKING: * There are three level of locking required by epoll : * - * 1) epsem (semaphore) + * 1) epmutex (mutex) * 2) ep->sem (rw_semaphore) * 3) ep->lock (rw_lock) * @@ -67,9 +68,9 @@ * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). * It is possible to drop the "ep->sem" and to use the global - * semaphore "epsem" (together with "ep->lock") to have it working, + * semaphore "epmutex" (together with "ep->lock") to have it working, * but having "ep->sem" will make the interface more scalable. - * Events that require holding "epsem" are very rare, while for + * Events that require holding "epmutex" are very rare, while for * normal operations the epoll private "ep->sem" will guarantee * a greater scalability. */ @@ -274,7 +275,7 @@ static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type, /* * This semaphore is used to serialize ep_free() and eventpoll_release_file(). */ -static struct semaphore epsem; +static struct mutex epmutex; /* Safe wake up implementation */ static struct poll_safewake psw; @@ -451,15 +452,6 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) } -/* Used to initialize the epoll bits inside the "struct file" */ -void eventpoll_init_file(struct file *file) -{ - - INIT_LIST_HEAD(&file->f_ep_links); - spin_lock_init(&file->f_ep_lock); -} - - /* * This is called from eventpoll_release() to unlink files from the eventpoll * interface. We need to have this facility to cleanup correctly files that are @@ -477,10 +469,10 @@ void eventpoll_release_file(struct file *file) * cleanup path, and this means that noone is using this file anymore. * The only hit might come from ep_free() but by holding the semaphore * will correctly serialize the operation. We do need to acquire - * "ep->sem" after "epsem" because ep_remove() requires it when called + * "ep->sem" after "epmutex" because ep_remove() requires it when called * from anywhere but ep_free(). */ - down(&epsem); + mutex_lock(&epmutex); while (!list_empty(lsthead)) { epi = list_entry(lsthead->next, struct epitem, fllink); @@ -492,7 +484,7 @@ void eventpoll_release_file(struct file *file) up_write(&ep->sem); } - up(&epsem); + mutex_unlock(&epmutex); } @@ -819,9 +811,9 @@ static void ep_free(struct eventpoll *ep) * We do not need to hold "ep->sem" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but - * holding "epsem" is sufficent here. + * holding "epmutex" is sufficent here. */ - down(&epsem); + mutex_lock(&epmutex); /* * Walks through the whole tree by unregistering poll callbacks. @@ -843,7 +835,7 @@ static void ep_free(struct eventpoll *ep) ep_remove(ep, epi); } - up(&epsem); + mutex_unlock(&epmutex); } @@ -1615,7 +1607,7 @@ static int __init eventpoll_init(void) { int error; - init_MUTEX(&epsem); + mutex_init(&epmutex); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_poll_safewake_init(&psw); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index ad1432a2a62..4ca82498532 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -36,22 +36,6 @@ #include "acl.h" #include "xip.h" -/* - * Couple of helper functions - make the code slightly cleaner. - */ - -static inline void ext2_inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void ext2_dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ext2_add_link(dentry, inode); @@ -59,7 +43,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - ext2_dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -201,7 +185,7 @@ out: return err; out_fail: - ext2_dec_count(inode); + inode_dec_link_count(inode); iput (inode); goto out; } @@ -215,7 +199,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - ext2_inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return ext2_add_nondir(dentry, inode); @@ -229,7 +213,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) if (dir->i_nlink >= EXT2_LINK_MAX) goto out; - ext2_inc_count(dir); + inode_inc_link_count(dir); inode = ext2_new_inode (dir, S_IFDIR | mode); err = PTR_ERR(inode); @@ -243,7 +227,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) else inode->i_mapping->a_ops = &ext2_aops; - ext2_inc_count(inode); + inode_inc_link_count(inode); err = ext2_make_empty(inode, dir); if (err) @@ -258,11 +242,11 @@ out: return err; out_fail: - ext2_dec_count(inode); - ext2_dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - ext2_dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -282,7 +266,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - ext2_dec_count(inode); + inode_dec_link_count(inode); err = 0; out: return err; @@ -297,8 +281,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry) err = ext2_unlink(dir, dentry); if (!err) { inode->i_size = 0; - ext2_dec_count(inode); - ext2_dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } return err; @@ -338,41 +322,41 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; - ext2_inc_count(old_inode); + inode_inc_link_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - ext2_dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - ext2_inc_count(old_inode); + inode_inc_link_count(old_inode); err = ext2_add_link(new_dentry, old_inode); if (err) { - ext2_dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - ext2_inc_count(new_dir); + inode_inc_link_count(new_dir); } /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * ext2_dec_count() will mark the inode dirty. + * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; ext2_delete_entry (old_de, old_page); - ext2_dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { ext2_set_link(old_inode, dir_de, dir_page, new_dir); - ext2_dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 832867aef3d..773459164bb 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -95,11 +95,10 @@ static int ext3_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; - unsigned long offset, blk; - int i, num, stored; - struct buffer_head * bh, * tmp, * bha[16]; - struct ext3_dir_entry_2 * de; - struct super_block * sb; + unsigned long offset; + int i, stored; + struct ext3_dir_entry_2 *de; + struct super_block *sb; int err; struct inode *inode = filp->f_dentry->d_inode; int ret = 0; @@ -124,12 +123,29 @@ static int ext3_readdir(struct file * filp, } #endif stored = 0; - bh = NULL; offset = filp->f_pos & (sb->s_blocksize - 1); while (!error && !stored && filp->f_pos < inode->i_size) { - blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb); - bh = ext3_bread(NULL, inode, blk, 0, &err); + unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); + struct buffer_head map_bh; + struct buffer_head *bh = NULL; + + map_bh.b_state = 0; + err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); + if (!err) { + page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, + &filp->f_ra, + filp, + map_bh.b_blocknr >> + (PAGE_CACHE_SHIFT - inode->i_blkbits), + 1); + bh = ext3_bread(NULL, inode, blk, 0, &err); + } + + /* + * We ignore I/O errors on directories so users have a chance + * of recovering data when there's a bad sector + */ if (!bh) { ext3_error (sb, "ext3_readdir", "directory #%lu contains a hole at offset %lu", @@ -138,26 +154,6 @@ static int ext3_readdir(struct file * filp, continue; } - /* - * Do the readahead - */ - if (!offset) { - for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0; - i > 0; i--) { - tmp = ext3_getblk (NULL, inode, ++blk, 0, &err); - if (tmp && !buffer_uptodate(tmp) && - !buffer_locked(tmp)) - bha[num++] = tmp; - else - brelse (tmp); - } - if (num) { - ll_rw_block (READA, num, bha); - for (i = 0; i < num; i++) - brelse (bha[i]); - } - } - revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid diff --git a/fs/ext3/file.c b/fs/ext3/file.c index 98e78345ead..59098ea5671 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c @@ -37,9 +37,9 @@ static int ext3_release_file (struct inode * inode, struct file * filp) if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { - down(&EXT3_I(inode)->truncate_sem); + mutex_lock(&EXT3_I(inode)->truncate_mutex); ext3_discard_reservation(inode); - up(&EXT3_I(inode)->truncate_sem); + mutex_unlock(&EXT3_I(inode)->truncate_mutex); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 0384e539b88..2c361377e0a 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -671,7 +671,7 @@ err_out: * The BKL may not be held on entry here. Be sure to take it early. */ -static int +int ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create, int extend_disksize) { @@ -702,7 +702,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, if (!create || err == -EIO) goto cleanup; - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); /* * If the indirect block is missing while we are reading @@ -723,7 +723,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, } partial = ext3_get_branch(inode, depth, offsets, chain, &err); if (!partial) { - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; clear_buffer_new(bh_result); @@ -759,13 +759,13 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, err = ext3_splice_branch(handle, inode, iblock, chain, partial, left); /* - * i_disksize growing is protected by truncate_sem. Don't forget to + * i_disksize growing is protected by truncate_mutex. Don't forget to * protect it if you're about to implement concurrent * ext3_get_block() -bzzz */ if (!err && extend_disksize && inode->i_size > ei->i_disksize) ei->i_disksize = inode->i_size; - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; @@ -1227,7 +1227,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... * * Same applies to ext3_get_block(). We will deadlock on various things like - * lock_journal and i_truncate_sem. + * lock_journal and i_truncate_mutex. * * Setting PF_MEMALLOC here doesn't work - too many internal memory * allocations fail. @@ -2161,7 +2161,7 @@ void ext3_truncate(struct inode * inode) * From here we block out all ext3_get_block() callers who want to * modify the block allocation tree. */ - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); if (n == 1) { /* direct blocks */ ext3_free_data(handle, inode, NULL, i_data+offsets[0], @@ -2228,7 +2228,7 @@ do_indirects: ext3_discard_reservation(inode); - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index 556cd551007..aaf1da17b6d 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c @@ -182,7 +182,7 @@ flags_err: * need to allocate reservation structure for this inode * before set the window size */ - down(&ei->truncate_sem); + mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext3_init_block_alloc_info(inode); @@ -190,7 +190,7 @@ flags_err: struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } - up(&ei->truncate_sem); + mutex_unlock(&ei->truncate_mutex); return 0; } case EXT3_IOC_GROUP_EXTEND: { diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 56bf7658601..efe5b20d7a5 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -472,7 +472,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif - init_MUTEX(&ei->truncate_sem); + mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } } @@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf) * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() - * DQUOT_INIT() down(dqio_sem) - * down(dqio_sem) journal_start() + * DQUOT_INIT() down(dqio_mutex) + * down(dqio_mutex) journal_start() * */ diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index a1a9e045121..ab171ea8e86 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -267,19 +267,19 @@ static struct fatent_operations fat32_ops = { static inline void lock_fat(struct msdos_sb_info *sbi) { - down(&sbi->fat_lock); + mutex_lock(&sbi->fat_lock); } static inline void unlock_fat(struct msdos_sb_info *sbi) { - up(&sbi->fat_lock); + mutex_unlock(&sbi->fat_lock); } void fat_ent_access_init(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); - init_MUTEX(&sbi->fat_lock); + mutex_init(&sbi->fat_lock); switch (sbi->fat_bits) { case 32: diff --git a/fs/fcntl.c b/fs/fcntl.c index dc4a7007f4e..03c789560fb 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -73,8 +73,8 @@ repeat: * orig_start..fdt->next_fd */ start = orig_start; - if (start < fdt->next_fd) - start = fdt->next_fd; + if (start < files->next_fd) + start = files->next_fd; newfd = start; if (start < fdt->max_fdset) { @@ -102,9 +102,8 @@ repeat: * we reacquire the fdtable pointer and use it while holding * the lock, no one can free it during that time. */ - fdt = files_fdtable(files); - if (start <= fdt->next_fd) - fdt->next_fd = newfd + 1; + if (start <= files->next_fd) + files->next_fd = newfd + 1; error = newfd; diff --git a/fs/file.c b/fs/file.c index cea7cbea11d..bbc74331473 100644 --- a/fs/file.c +++ b/fs/file.c @@ -125,7 +125,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) kmem_cache_free(files_cachep, fdt->free_files); return; } - if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { + if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && + fdt->max_fds <= NR_OPEN_DEFAULT) { /* * The fdtable was embedded */ @@ -155,8 +156,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu) void free_fdtable(struct fdtable *fdt) { - if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || - fdt->max_fds > NR_OPEN_DEFAULT) + if (fdt->free_files || + fdt->max_fdset > EMBEDDED_FD_SET_SIZE || + fdt->max_fds > NR_OPEN_DEFAULT) call_rcu(&fdt->rcu, free_fdtable_rcu); } @@ -199,7 +201,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) (nfdt->max_fds - fdt->max_fds) * sizeof(struct file *)); } - nfdt->next_fd = fdt->next_fd; } /* @@ -220,11 +221,9 @@ fd_set * alloc_fdset(int num) void free_fdset(fd_set *array, int num) { - int size = num / 8; - - if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */ + if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ return; - else if (size <= PAGE_SIZE) + else if (num <= 8 * PAGE_SIZE) kfree(array); else vfree(array); @@ -237,22 +236,17 @@ static struct fdtable *alloc_fdtable(int nr) fd_set *new_openset = NULL, *new_execset = NULL; struct file **new_fds; - fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); + fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); if (!fdt) goto out; - memset(fdt, 0, sizeof(*fdt)); - nfds = __FD_SETSIZE; + nfds = 8 * L1_CACHE_BYTES; /* Expand to the max in easy steps */ - do { - if (nfds < (PAGE_SIZE * 8)) - nfds = PAGE_SIZE * 8; - else { - nfds = nfds * 2; - if (nfds > NR_OPEN) - nfds = NR_OPEN; - } - } while (nfds <= nr); + while (nfds <= nr) { + nfds = nfds * 2; + if (nfds > NR_OPEN) + nfds = NR_OPEN; + } new_openset = alloc_fdset(nfds); new_execset = alloc_fdset(nfds); diff --git a/fs/file_table.c b/fs/file_table.c index 44fabeaa941..bcea1998b4d 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -88,6 +88,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp, */ struct file *get_empty_filp(void) { + struct task_struct *tsk; static int old_max; struct file * f; @@ -112,13 +113,14 @@ struct file *get_empty_filp(void) if (security_file_alloc(f)) goto fail_sec; - eventpoll_init_file(f); + tsk = current; + INIT_LIST_HEAD(&f->f_u.fu_list); atomic_set(&f->f_count, 1); - f->f_uid = current->fsuid; - f->f_gid = current->fsgid; rwlock_init(&f->f_owner.lock); + f->f_uid = tsk->fsuid; + f->f_gid = tsk->fsgid; + eventpoll_init_file(f); /* f->f_version: 0 */ - INIT_LIST_HEAD(&f->f_u.fu_list); return f; over: diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 6628c3b352c..4c6473ab3b3 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -9,6 +9,7 @@ //#define DBG //#define DEBUG_LOCKS +#include <linux/mutex.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/hpfs_fs.h> @@ -57,8 +58,8 @@ struct hpfs_inode_info { unsigned i_ea_uid : 1; /* file's uid is stored in ea */ unsigned i_ea_gid : 1; /* file's gid is stored in ea */ unsigned i_dirty : 1; - struct semaphore i_sem; - struct semaphore i_parent; + struct mutex i_mutex; + struct mutex i_parent_mutex; loff_t **i_rddir_off; struct inode vfs_inode; }; diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index e3d17e9ea6c..56f2c338c4d 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -186,9 +186,9 @@ void hpfs_write_inode(struct inode *i) kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } - down(&hpfs_inode->i_parent); + mutex_lock(&hpfs_inode->i_parent_mutex); if (!i->i_nlink) { - up(&hpfs_inode->i_parent); + mutex_unlock(&hpfs_inode->i_parent_mutex); return; } parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); @@ -199,14 +199,14 @@ void hpfs_write_inode(struct inode *i) hpfs_read_inode(parent); unlock_new_inode(parent); } - down(&hpfs_inode->i_sem); + mutex_lock(&hpfs_inode->i_mutex); hpfs_write_inode_nolock(i); - up(&hpfs_inode->i_sem); + mutex_unlock(&hpfs_inode->i_mutex); iput(parent); } else { mark_inode_dirty(i); } - up(&hpfs_inode->i_parent); + mutex_unlock(&hpfs_inode->i_parent_mutex); } void hpfs_write_inode_nolock(struct inode *i) diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 8ff8fc433fc..a03abb12c61 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -60,7 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (dee.read_only) result->i_mode &= ~0222; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail3; @@ -101,11 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail3: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail2: hpfs_brelse4(&qbh0); @@ -168,7 +168,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = 0; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -193,12 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -254,7 +254,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t result->i_blocks = 1; init_special_inode(result, mode, rdev); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -271,12 +271,12 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); brelse(bh); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -333,7 +333,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_op = &page_symlink_inode_operations; result->i_data.a_ops = &hpfs_symlink_aops; - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(dir)->i_mutex); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -352,11 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); unlock_kernel(); return 0; bail2: - up(&hpfs_i(dir)->i_sem); + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -382,8 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); hpfs_adjust_length((char *)name, &len); again: - down(&hpfs_i(inode)->i_parent); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -410,8 +410,8 @@ again: if (rep++) break; - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); d_drop(dentry); spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count) > 1 || @@ -442,8 +442,8 @@ again: out1: hpfs_brelse4(&qbh); out: - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); unlock_kernel(); return err; } @@ -463,8 +463,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) hpfs_adjust_length((char *)name, &len); lock_kernel(); - down(&hpfs_i(inode)->i_parent); - down(&hpfs_i(dir)->i_sem); + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -502,8 +502,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: - up(&hpfs_i(dir)->i_sem); - up(&hpfs_i(inode)->i_parent); + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); unlock_kernel(); return err; } @@ -565,12 +565,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, lock_kernel(); /* order doesn't matter, due to VFS exclusion */ - down(&hpfs_i(i)->i_parent); + mutex_lock(&hpfs_i(i)->i_parent_mutex); if (new_inode) - down(&hpfs_i(new_inode)->i_parent); - down(&hpfs_i(old_dir)->i_sem); + mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); + mutex_lock(&hpfs_i(old_dir)->i_mutex); if (new_dir != old_dir) - down(&hpfs_i(new_dir)->i_sem); + mutex_lock(&hpfs_i(new_dir)->i_mutex); /* Erm? Moving over the empty non-busy directory is perfectly legal */ if (new_inode && S_ISDIR(new_inode->i_mode)) { @@ -650,11 +650,11 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, hpfs_decide_conv(i, (char *)new_name, new_len); end1: if (old_dir != new_dir) - up(&hpfs_i(new_dir)->i_sem); - up(&hpfs_i(old_dir)->i_sem); - up(&hpfs_i(i)->i_parent); + mutex_unlock(&hpfs_i(new_dir)->i_mutex); + mutex_unlock(&hpfs_i(old_dir)->i_mutex); + mutex_unlock(&hpfs_i(i)->i_parent_mutex); if (new_inode) - up(&hpfs_i(new_inode)->i_parent); + mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); unlock_kernel(); return err; } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 63e88d7e2c3..9488a794076 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -181,8 +181,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - init_MUTEX(&ei->i_sem); - init_MUTEX(&ei->i_parent); + mutex_init(&ei->i_mutex); + mutex_init(&ei->i_parent_mutex); inode_init_once(&ei->vfs_inode); } } diff --git a/fs/inode.c b/fs/inode.c index d0be6159eb7..25967b67903 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable; DEFINE_SPINLOCK(inode_lock); /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages + * iprune_mutex provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. */ -DECLARE_MUTEX(iprune_sem); +DEFINE_MUTEX(iprune_mutex); /* * Statistics gathering.. @@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode) i_size_ordered_init(inode); #ifdef CONFIG_INOTIFY INIT_LIST_HEAD(&inode->inotify_watches); - sema_init(&inode->inotify_sem, 1); + mutex_init(&inode->inotify_mutex); #endif } @@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not - * change during umount anymore, and because iprune_sem keeps + * change during umount anymore, and because iprune_mutex keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); @@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb) int busy; LIST_HEAD(throw_away); - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); inotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); return busy; } @@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev) if (sb) { /* * no need to lock the super, get_super holds the - * read semaphore so the filesystem cannot go away + * read mutex so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ @@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode_lock); dispose_list(&freeable); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); if (current_is_kswapd()) mod_page_state(kswapd_inodesteal, reap); diff --git a/fs/inotify.c b/fs/inotify.c index 3041503bde0..0ee39ef591c 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -54,10 +54,10 @@ int inotify_max_queued_events; * Lock ordering: * * dentry->d_lock (used to keep d_move() away from dentry->d_parent) - * iprune_sem (synchronize shrink_icache_memory()) + * iprune_mutex (synchronize shrink_icache_memory()) * inode_lock (protects the super_block->s_inodes list) - * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) - * inotify_dev->sem (protects inotify_device and watches->d_list) + * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) + * inotify_dev->mutex (protects inotify_device and watches->d_list) */ /* @@ -79,12 +79,12 @@ int inotify_max_queued_events; /* * struct inotify_device - represents an inotify instance * - * This structure is protected by the semaphore 'sem'. + * This structure is protected by the mutex 'mutex'. */ struct inotify_device { wait_queue_head_t wq; /* wait queue for i/o */ struct idr idr; /* idr mapping wd -> watch */ - struct semaphore sem; /* protects this bad boy */ + struct mutex mutex; /* protects this bad boy */ struct list_head events; /* list of queued events */ struct list_head watches; /* list of watches */ atomic_t count; /* reference count */ @@ -101,7 +101,7 @@ struct inotify_device { * device. In read(), this list is walked and all events that can fit in the * buffer are returned. * - * Protected by dev->sem of the device in which we are queued. + * Protected by dev->mutex of the device in which we are queued. */ struct inotify_kernel_event { struct inotify_event event; /* the user-space event */ @@ -112,8 +112,8 @@ struct inotify_kernel_event { /* * struct inotify_watch - represents a watch request on a specific inode * - * d_list is protected by dev->sem of the associated watch->dev. - * i_list and mask are protected by inode->inotify_sem of the associated inode. + * d_list is protected by dev->mutex of the associated watch->dev. + * i_list and mask are protected by inode->inotify_mutex of the associated inode. * dev, inode, and wd are never written to once the watch is created. */ struct inotify_watch { @@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, /* * inotify_dev_get_event - return the next event in the given dev's queue * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static inline struct inotify_kernel_event * inotify_dev_get_event(struct inotify_device *dev) @@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev) /* * inotify_dev_queue_event - add a new event to the given device * - * Caller must hold dev->sem. Can sleep (calls kernel_event()). + * Caller must hold dev->mutex. Can sleep (calls kernel_event()). */ static void inotify_dev_queue_event(struct inotify_device *dev, struct inotify_watch *watch, u32 mask, @@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev, /* * remove_kevent - cleans up and ultimately frees the given kevent * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static void remove_kevent(struct inotify_device *dev, struct inotify_kernel_event *kevent) @@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev, /* * inotify_dev_event_dequeue - destroy an event on the given device * - * Caller must hold dev->sem. + * Caller must hold dev->mutex. */ static void inotify_dev_event_dequeue(struct inotify_device *dev) { @@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) /* * inotify_dev_get_wd - returns the next WD for use by the given dev * - * Callers must hold dev->sem. This function can sleep. + * Callers must hold dev->mutex. This function can sleep. */ static int inotify_dev_get_wd(struct inotify_device *dev, struct inotify_watch *watch) @@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, /* * create_watch - creates a watch on the given device. * - * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. + * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. */ static struct inotify_watch *create_watch(struct inotify_device *dev, @@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, /* * inotify_find_dev - find the watch associated with the given inode and dev * - * Callers must hold inode->inotify_sem. + * Callers must hold inode->inotify_mutex. */ static struct inotify_watch *inode_find_dev(struct inode *inode, struct inotify_device *dev) @@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, * the IN_IGNORED event to the given device signifying that the inode is no * longer watched. * - * Callers must hold both inode->inotify_sem and dev->sem. We drop a + * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a * reference to the inode before returning. * * The inode is not iput() so as to remain atomic. If the inode needs to be @@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, if (!inotify_inode_watched(inode)) return; - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { u32 watch_mask = watch->mask; if (watch_mask & mask) { struct inotify_device *dev = watch->dev; get_inotify_watch(watch); - down(&dev->sem); + mutex_lock(&dev->mutex); inotify_dev_queue_event(dev, watch, mask, cookie, name); if (watch_mask & IN_ONESHOT) remove_watch_no_event(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); put_inotify_watch(watch); } } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_queue_event); @@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); * @list: list of inodes being unmounted (sb->s_inodes) * * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. + * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. * We temporarily drop inode_lock, however, and CAN block. */ void inotify_unmount_inodes(struct list_head *list) @@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list) * We can safely drop inode_lock here because we hold * references on both inode and next_i. Also no new inodes * will be added since the umount has begun. Finally, - * iprune_sem keeps shrink_icache_memory() away. + * iprune_mutex keeps shrink_icache_memory() away. */ spin_unlock(&inode_lock); @@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list) iput(need_iput_tmp); /* for each watch, send IN_UNMOUNT and then remove it */ - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); watches = &inode->inotify_watches; list_for_each_entry_safe(watch, next_w, watches, i_list) { struct inotify_device *dev = watch->dev; - down(&dev->sem); + mutex_lock(&dev->mutex); inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); remove_watch(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); iput(inode); spin_lock(&inode_lock); @@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode) { struct inotify_watch *watch, *next; - down(&inode->inotify_sem); + mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { struct inotify_device *dev = watch->dev; - down(&dev->sem); + mutex_lock(&dev->mutex); remove_watch(watch, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } - up(&inode->inotify_sem); + mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_is_dead); @@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) int ret = 0; poll_wait(file, &dev->wq, wait); - down(&dev->sem); + mutex_lock(&dev->mutex); if (!list_empty(&dev->events)) ret = POLLIN | POLLRDNORM; - up(&dev->sem); + mutex_unlock(&dev->mutex); return ret; } @@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); - down(&dev->sem); + mutex_lock(&dev->mutex); events = !list_empty(&dev->events); - up(&dev->sem); + mutex_unlock(&dev->mutex); if (events) { ret = 0; break; @@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, if (ret) return ret; - down(&dev->sem); + mutex_lock(&dev->mutex); while (1) { struct inotify_kernel_event *kevent; @@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, remove_kevent(dev, kevent); } - up(&dev->sem); + mutex_unlock(&dev->mutex); return ret; } @@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file) * Destroy all of the watches on this device. Unfortunately, not very * pretty. We cannot do a simple iteration over the list, because we * do not know the inode until we iterate to the watch. But we need to - * hold inode->inotify_sem before dev->sem. The following works. + * hold inode->inotify_mutex before dev->mutex. The following works. */ while (1) { struct inotify_watch *watch; struct list_head *watches; struct inode *inode; - down(&dev->sem); + mutex_lock(&dev->mutex); watches = &dev->watches; if (list_empty(watches)) { - up(&dev->sem); + mutex_unlock(&dev->mutex); break; } watch = list_entry(watches->next, struct inotify_watch, d_list); get_inotify_watch(watch); - up(&dev->sem); + mutex_unlock(&dev->mutex); inode = watch->inode; - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); remove_watch_no_event(watch, dev); - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); put_inotify_watch(watch); } /* destroy all of the events on this device */ - down(&dev->sem); + mutex_lock(&dev->mutex); while (!list_empty(&dev->events)) inotify_dev_event_dequeue(dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); /* free this device: the put matching the get in inotify_init() */ put_inotify_dev(dev); @@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) struct inotify_watch *watch; struct inode *inode; - down(&dev->sem); + mutex_lock(&dev->mutex); watch = idr_find(&dev->idr, wd); if (unlikely(!watch)) { - up(&dev->sem); + mutex_unlock(&dev->mutex); return -EINVAL; } get_inotify_watch(watch); inode = watch->inode; - up(&dev->sem); + mutex_unlock(&dev->mutex); - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); /* make sure that we did not race */ watch = idr_find(&dev->idr, wd); if (likely(watch)) remove_watch(watch, dev); - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); put_inotify_watch(watch); return 0; @@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void) INIT_LIST_HEAD(&dev->events); INIT_LIST_HEAD(&dev->watches); init_waitqueue_head(&dev->wq); - sema_init(&dev->sem, 1); + mutex_init(&dev->mutex); dev->event_count = 0; dev->queue_size = 0; dev->max_events = inotify_max_queued_events; @@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) inode = nd.dentry->d_inode; dev = filp->private_data; - down(&inode->inotify_sem); - down(&dev->sem); + mutex_lock(&inode->inotify_mutex); + mutex_lock(&dev->mutex); if (mask & IN_MASK_ADD) mask_add = 1; @@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) list_add(&watch->i_list, &inode->inotify_watches); ret = watch->wd; out: - up(&dev->sem); - up(&inode->inotify_sem); + mutex_unlock(&dev->mutex); + mutex_unlock(&inode->inotify_mutex); path_release(&nd); fput_and_out: fput_light(filp, fput_needed); diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 543ed543d1e..3f5102b069d 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -85,7 +85,7 @@ void __log_wait_for_space(journal_t *journal) if (journal->j_flags & JFS_ABORT) return; spin_unlock(&journal->j_state_lock); - down(&journal->j_checkpoint_sem); + mutex_lock(&journal->j_checkpoint_mutex); /* * Test again, another process may have checkpointed while we @@ -98,7 +98,7 @@ void __log_wait_for_space(journal_t *journal) log_do_checkpoint(journal); spin_lock(&journal->j_state_lock); } - up(&journal->j_checkpoint_sem); + mutex_unlock(&journal->j_checkpoint_mutex); } } diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index e4b516ac498..95a628d8cac 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -659,8 +659,8 @@ static journal_t * journal_init_common (void) init_waitqueue_head(&journal->j_wait_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); - init_MUTEX(&journal->j_barrier); - init_MUTEX(&journal->j_checkpoint_sem); + mutex_init(&journal->j_barrier); + mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); spin_lock_init(&journal->j_list_lock); spin_lock_init(&journal->j_state_lock); diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index ca917973c2c..5fc40888f4c 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -455,7 +455,7 @@ void journal_lock_updates(journal_t *journal) * to make sure that we serialise special journal-locked operations * too. */ - down(&journal->j_barrier); + mutex_lock(&journal->j_barrier); } /** @@ -470,7 +470,7 @@ void journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); - up(&journal->j_barrier); + mutex_unlock(&journal->j_barrier); spin_lock(&journal->j_state_lock); --journal->j_barrier_count; spin_unlock(&journal->j_state_lock); diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c index fc3855a1aef..890d7ff7456 100644 --- a/fs/jffs/inode-v23.c +++ b/fs/jffs/inode-v23.c @@ -42,7 +42,7 @@ #include <linux/quotaops.h> #include <linux/highmem.h> #include <linux/vfs.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <asm/uaccess.h> @@ -203,7 +203,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) fmc = c->fmc; D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); - down(&fmc->biglock); + mutex_lock(&fmc->biglock); f = jffs_find_file(c, inode->i_ino); @@ -211,7 +211,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) printk("jffs_setattr(): Invalid inode number: %lu\n", inode->i_ino); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); res = -EINVAL; goto out; }); @@ -232,7 +232,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) if (!(new_node = jffs_alloc_node())) { D(printk("jffs_setattr(): Allocation failed!\n")); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); res = -ENOMEM; goto out; } @@ -319,7 +319,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) D(printk("jffs_notify_change(): The write failed!\n")); jffs_free_node(new_node); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); goto out; } @@ -327,7 +327,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) mark_inode_dirty(inode); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); out: unlock_kernel(); return res; @@ -461,7 +461,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, goto jffs_rename_end; } D3(printk (KERN_NOTICE "rename(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Create a node and initialize as much as needed. */ result = -ENOMEM; if (!(node = jffs_alloc_node())) { @@ -555,7 +555,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, jffs_rename_end: D3(printk (KERN_NOTICE "rename(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_rename() */ @@ -574,14 +574,14 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) int ddino; lock_kernel(); D3(printk (KERN_NOTICE "readdir(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); if (filp->f_pos == 0) { D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -598,7 +598,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -617,7 +617,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (filldir(dirent, f->name, f->nsize, filp->f_pos , f->ino, DT_UNKNOWN) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -627,7 +627,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) } while(f && f->deleted); } D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return filp->f_pos; } /* jffs_readdir() */ @@ -660,7 +660,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) }); D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); r = -ENAMETOOLONG; if (len > JFFS_MAX_NAME_LEN) { @@ -683,31 +683,31 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if ((len == 1) && (name[0] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->ino))) { D(printk("jffs_lookup(): . iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->pino))) { D(printk("jffs_lookup(): .. iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else if ((f = jffs_find_child(d, name, len))) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, f->ino))) { D(printk("jffs_lookup(): iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); } else { D3(printk("jffs_lookup(): Couldn't find the file. " "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", @@ -717,13 +717,13 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) d_add(dentry, inode); D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return NULL; jffs_lookup_end: D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); jffs_lookup_end_no_biglock: unlock_kernel(); @@ -753,7 +753,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ClearPageError(page); D3(printk (KERN_NOTICE "readpage(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); read_len = 0; result = 0; @@ -782,7 +782,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) kunmap(page); D3(printk (KERN_NOTICE "readpage(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); if (result) { SetPageError(page); @@ -839,7 +839,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) c = dir_f->c; D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask); @@ -906,7 +906,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) result = 0; jffs_mkdir_end: D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mkdir() */ @@ -921,10 +921,10 @@ jffs_rmdir(struct inode *dir, struct dentry *dentry) D3(printk("***jffs_rmdir()\n")); D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); lock_kernel(); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); ret = jffs_remove(dir, dentry, S_IFDIR); D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -940,10 +940,10 @@ jffs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); D3(printk("***jffs_unlink()\n")); D3(printk (KERN_NOTICE "unlink(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); ret = jffs_remove(dir, dentry, 0); D3(printk (KERN_NOTICE "unlink(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -1086,7 +1086,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) c = dir_f->c; D3(printk (KERN_NOTICE "mknod(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Create and initialize a new node. */ if (!(node = jffs_alloc_node())) { @@ -1152,7 +1152,7 @@ jffs_mknod_err: jffs_mknod_end: D3(printk (KERN_NOTICE "mknod(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mknod() */ @@ -1203,7 +1203,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) return -ENOMEM; } D3(printk (KERN_NOTICE "symlink(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1253,7 +1253,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) d_instantiate(dentry, inode); jffs_symlink_end: D3(printk (KERN_NOTICE "symlink(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_symlink() */ @@ -1306,7 +1306,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, return -ENOMEM; } D3(printk (KERN_NOTICE "create(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1359,7 +1359,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, d_instantiate(dentry, inode); jffs_create_end: D3(printk (KERN_NOTICE "create(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_create() */ @@ -1423,7 +1423,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); D3(printk (KERN_NOTICE "file_write(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); /* Urgh. POSIX says we can do short writes if we feel like it. * In practice, we can't. Nothing will cope. So we loop until @@ -1511,7 +1511,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, } out: D3(printk (KERN_NOTICE "file_write(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); /* Fix things in the real inode. */ if (pos > inode->i_size) { @@ -1567,7 +1567,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, return -EIO; } D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); switch (cmd) { case JFFS_PRINT_HASH: @@ -1609,7 +1609,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, ret = -ENOTTY; } D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); return ret; } /* jffs_ioctl() */ @@ -1685,12 +1685,12 @@ jffs_read_inode(struct inode *inode) } c = (struct jffs_control *)inode->i_sb->s_fs_info; D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); - down(&c->fmc->biglock); + mutex_lock(&c->fmc->biglock); if (!(f = jffs_find_file(c, inode->i_ino))) { D(printk("jffs_read_inode(): No such inode (%lu).\n", inode->i_ino)); D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); return; } inode->u.generic_ip = (void *)f; @@ -1732,7 +1732,7 @@ jffs_read_inode(struct inode *inode) } D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - up(&c->fmc->biglock); + mutex_unlock(&c->fmc->biglock); } diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index ce7b54b0b2b..0ef207dfaf6 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c @@ -62,7 +62,7 @@ #include <linux/fs.h> #include <linux/stat.h> #include <linux/pagemap.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <linux/smp_lock.h> #include <linux/time.h> @@ -3416,7 +3416,7 @@ jffs_garbage_collect_thread(void *ptr) D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); - down(&fmc->biglock); + mutex_lock(&fmc->biglock); D1(printk("***jffs_garbage_collect_thread(): round #%u, " "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); @@ -3447,6 +3447,6 @@ jffs_garbage_collect_thread(void *ptr) gc_end: D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); - up(&fmc->biglock); + mutex_unlock(&fmc->biglock); } /* for (;;) */ } /* jffs_garbage_collect_thread() */ diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c index 6da13b309bd..7d8ca1aeace 100644 --- a/fs/jffs/jffs_fm.c +++ b/fs/jffs/jffs_fm.c @@ -139,7 +139,7 @@ jffs_build_begin(struct jffs_control *c, int unit) fmc->tail = NULL; fmc->head_extra = NULL; fmc->tail_extra = NULL; - init_MUTEX(&fmc->biglock); + mutex_init(&fmc->biglock); return fmc; } diff --git a/fs/jffs/jffs_fm.h b/fs/jffs/jffs_fm.h index f64151e7412..c794d923df2 100644 --- a/fs/jffs/jffs_fm.h +++ b/fs/jffs/jffs_fm.h @@ -20,10 +20,11 @@ #ifndef __LINUX_JFFS_FM_H__ #define __LINUX_JFFS_FM_H__ +#include <linux/config.h> #include <linux/types.h> #include <linux/jffs.h> #include <linux/mtd/mtd.h> -#include <linux/config.h> +#include <linux/mutex.h> /* The alignment between two nodes in the flash memory. */ #define JFFS_ALIGN_SIZE 4 @@ -97,7 +98,7 @@ struct jffs_fmcontrol struct jffs_fm *tail; struct jffs_fm *head_extra; struct jffs_fm *tail_extra; - struct semaphore biglock; + struct mutex biglock; }; /* Notice the two members head_extra and tail_extra in the jffs_control diff --git a/fs/libfs.c b/fs/libfs.c index 71fd08fa410..4fdeaceb892 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -7,6 +7,8 @@ #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/vfs.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, @@ -530,7 +532,7 @@ struct simple_attr { char set_buf[24]; void *data; const char *fmt; /* format for read operation */ - struct semaphore sem; /* protects access to these buffers */ + struct mutex mutex; /* protects access to these buffers */ }; /* simple_attr_open is called by an actual attribute open file operation @@ -549,7 +551,7 @@ int simple_attr_open(struct inode *inode, struct file *file, attr->set = set; attr->data = inode->u.generic_ip; attr->fmt = fmt; - init_MUTEX(&attr->sem); + mutex_init(&attr->mutex); file->private_data = attr; @@ -575,7 +577,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, if (!attr->get) return -EACCES; - down(&attr->sem); + mutex_lock(&attr->mutex); if (*ppos) /* continued read */ size = strlen(attr->get_buf); else /* first read */ @@ -584,7 +586,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, (unsigned long long)attr->get(attr->data)); ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); - up(&attr->sem); + mutex_unlock(&attr->mutex); return ret; } @@ -602,7 +604,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, if (!attr->set) return -EACCES; - down(&attr->sem); + mutex_lock(&attr->mutex); ret = -EFAULT; size = min(sizeof(attr->set_buf) - 1, len); if (copy_from_user(attr->set_buf, buf, size)) @@ -613,7 +615,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, val = simple_strtol(attr->set_buf, NULL, 0); attr->set(attr->data, val); out: - up(&attr->sem); + mutex_unlock(&attr->mutex); return ret; } diff --git a/fs/minix/namei.c b/fs/minix/namei.c index b25bca5bdb5..5b6a4540a05 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -6,18 +6,6 @@ #include "minix.h" -static inline void inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = minix_add_link(dentry, inode); @@ -25,7 +13,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -125,7 +113,7 @@ out: return err; out_fail: - dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -139,7 +127,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); } @@ -152,7 +140,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) goto out; - inc_count(dir); + inode_inc_link_count(dir); inode = minix_new_inode(dir, &err); if (!inode) @@ -163,7 +151,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) inode->i_mode |= S_ISGID; minix_set_inode(inode, 0); - inc_count(inode); + inode_inc_link_count(inode); err = minix_make_empty(inode, dir); if (err) @@ -178,11 +166,11 @@ out: return err; out_fail: - dec_count(inode); - dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -202,7 +190,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry) goto end_unlink; inode->i_ctime = dir->i_ctime; - dec_count(inode); + inode_dec_link_count(inode); end_unlink: return err; } @@ -215,8 +203,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) if (minix_empty_dir(inode)) { err = minix_unlink(dir, dentry); if (!err) { - dec_count(dir); - dec_count(inode); + inode_dec_link_count(dir); + inode_dec_link_count(inode); } } return err; @@ -257,34 +245,34 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, new_de = minix_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inc_count(old_inode); + inode_inc_link_count(old_inode); minix_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= info->s_link_max) goto out_dir; } - inc_count(old_inode); + inode_inc_link_count(old_inode); err = minix_add_link(new_dentry, old_inode); if (err) { - dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - inc_count(new_dir); + inode_inc_link_count(new_dir); } minix_delete_entry(old_de, old_page); - dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { minix_set_link(dir_de, dir_page, new_dir); - dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/namei.c b/fs/namei.c index 8dc2b038d5d..c72b940797f 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -104,7 +104,7 @@ */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) - * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives + * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ @@ -1422,7 +1422,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) return NULL; } - down(&p1->d_inode->i_sb->s_vfs_rename_sem); + mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { @@ -1450,7 +1450,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); - up(&p1->d_inode->i_sb->s_vfs_rename_sem); + mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } @@ -2277,17 +2277,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname) * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on - * sb->s_vfs_rename_sem. We might be more accurate, but that's another + * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change - * only under ->s_vfs_rename_sem _and_ that parent of the object we + * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, - * lock child" and rename is under ->s_vfs_rename_sem. + * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 973b444d691..ebdad8f6398 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -46,7 +46,7 @@ int ncp_make_open(struct inode *inode, int right) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum); error = -EACCES; - down(&NCP_FINFO(inode)->open_sem); + mutex_lock(&NCP_FINFO(inode)->open_mutex); if (!atomic_read(&NCP_FINFO(inode)->opened)) { struct ncp_entry_info finfo; int result; @@ -93,7 +93,7 @@ int ncp_make_open(struct inode *inode, int right) } out_unlock: - up(&NCP_FINFO(inode)->open_sem); + mutex_unlock(&NCP_FINFO(inode)->open_mutex); out: return error; } diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index d277a58bd12..0b521d3d97c 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -63,7 +63,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - init_MUTEX(&ei->open_sem); + mutex_init(&ei->open_mutex); inode_init_once(&ei->vfs_inode); } } @@ -520,7 +520,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) } /* server->lock = 0; */ - init_MUTEX(&server->sem); + mutex_init(&server->mutex); server->packet = NULL; /* server->buffer_size = 0; */ /* server->conn_status = 0; */ @@ -557,7 +557,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server->dentry_ttl = 0; /* no caching */ INIT_LIST_HEAD(&server->tx.requests); - init_MUTEX(&server->rcv.creq_sem); + mutex_init(&server->rcv.creq_mutex); server->tx.creq = NULL; server->rcv.creq = NULL; server->data_ready = sock->sk->sk_data_ready; diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index c755e1848a4..d9ebf6439f5 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -291,7 +291,7 @@ ncp_make_closed(struct inode *inode) int err; err = 0; - down(&NCP_FINFO(inode)->open_sem); + mutex_lock(&NCP_FINFO(inode)->open_mutex); if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_set(&NCP_FINFO(inode)->opened, 0); err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); @@ -301,7 +301,7 @@ ncp_make_closed(struct inode *inode) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, err); } - up(&NCP_FINFO(inode)->open_sem); + mutex_unlock(&NCP_FINFO(inode)->open_mutex); return err; } diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 6593a5ca88b..8783eb7ec64 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -171,9 +171,9 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) { - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncp_abort_request(server, req, err); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static inline void __ncptcp_abort(struct ncp_server *server) @@ -303,20 +303,20 @@ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_req static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) { - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); if (!ncp_conn_valid(server)) { - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); printk(KERN_ERR "ncpfs: tcp: Server died\n"); return -EIO; } if (server->tx.creq || server->rcv.creq) { req->status = RQ_QUEUED; list_add_tail(&req->req, &server->tx.requests); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); return 0; } __ncp_start_request(server, req); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); return 0; } @@ -400,7 +400,7 @@ void ncpdgram_rcv_proc(void *s) info_server(server, 0, server->unexpected_packet.data, result); continue; } - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); req = server->rcv.creq; if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && server->connection == get_conn_number(&reply)))) { @@ -430,11 +430,11 @@ void ncpdgram_rcv_proc(void *s) server->rcv.creq = NULL; ncp_finish_request(req, result); __ncp_next_request(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); continue; } } - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } drop:; _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); @@ -472,9 +472,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) void ncpdgram_timeout_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncpdgram_timeout_proc(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static inline void ncp_init_req(struct ncp_request_reply* req) @@ -657,18 +657,18 @@ void ncp_tcp_rcv_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncptcp_rcv_proc(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } void ncp_tcp_tx_proc(void *s) { struct ncp_server *server = s; - down(&server->rcv.creq_sem); + mutex_lock(&server->rcv.creq_mutex); __ncptcp_try_send(server); - up(&server->rcv.creq_sem); + mutex_unlock(&server->rcv.creq_mutex); } static int do_ncp_rpc_call(struct ncp_server *server, int size, @@ -833,7 +833,7 @@ int ncp_disconnect(struct ncp_server *server) void ncp_lock_server(struct ncp_server *server) { - down(&server->sem); + mutex_lock(&server->mutex); if (server->lock) printk(KERN_WARNING "ncp_lock_server: was locked!\n"); server->lock = 1; @@ -846,5 +846,5 @@ void ncp_unlock_server(struct ncp_server *server) return; } server->lock = 0; - up(&server->sem); + mutex_unlock(&server->mutex); } diff --git a/fs/open.c b/fs/open.c index 70e0230d8e7..1091dadd6c3 100644 --- a/fs/open.c +++ b/fs/open.c @@ -973,7 +973,7 @@ repeat: fdt = files_fdtable(files); fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fdset, - fdt->next_fd); + files->next_fd); /* * N.B. For clone tasks sharing a files structure, this test @@ -998,7 +998,7 @@ repeat: FD_SET(fd, fdt->open_fds); FD_CLR(fd, fdt->close_on_exec); - fdt->next_fd = fd + 1; + files->next_fd = fd + 1; #if 1 /* Sanity check */ if (fdt->fd[fd] != NULL) { @@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __FD_CLR(fd, fdt->open_fds); - if (fd < fdt->next_fd) - fdt->next_fd = fd; + if (fd < files->next_fd) + files->next_fd = fd; } void fastcall put_unused_fd(unsigned int fd) diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 1d24fead51a..826c131994c 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -312,7 +312,7 @@ static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) case BLK_HDR: info->state = BLK_LIST; (*pos)++; - break; + /*fallthrough*/ case BLK_LIST: if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { /* diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c index b471315e24e..c33963fded9 100644 --- a/fs/qnx4/file.c +++ b/fs/qnx4/file.c @@ -12,10 +12,7 @@ * 27-06-1998 by Frank Denis : file overwriting. */ -#include <linux/config.h> -#include <linux/types.h> #include <linux/fs.h> -#include <linux/time.h> #include <linux/qnx4_fs.h> /* diff --git a/fs/quota.c b/fs/quota.c index ba9e0bf32f6..d6a2be826e2 100644 --- a/fs/quota.c +++ b/fs/quota.c @@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type) /* Now when everything is written we can discard the pagecache so * that userspace sees the changes. We need i_mutex and so we could - * not do it inside dqonoff_sem. Moreover we need to be carefull + * not do it inside dqonoff_mutex. Moreover we need to be carefull * about races with quotaoff() (that is the reason why we have own * reference to inode). */ - down(&sb_dqopt(sb)->dqonoff_sem); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { discard[cnt] = NULL; if (type != -1 && cnt != type) @@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type) continue; discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); } - up(&sb_dqopt(sb)->dqonoff_sem); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (discard[cnt]) { mutex_lock(&discard[cnt]->i_mutex); diff --git a/fs/quota_v2.c b/fs/quota_v2.c index b4199ec3ece..c519a583e68 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c @@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot) ssize_t ret; struct v2_disk_dqblk ddquot, empty; - /* dq_off is guarded by dqio_sem */ + /* dq_off is guarded by dqio_mutex */ if (!dquot->dq_off) if ((ret = dq_insert_tree(dquot)) < 0) { printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 2115383dcc8..6ada2095b9a 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -24,18 +24,7 @@ * caches is sufficient. */ -#include <linux/module.h> #include <linux/fs.h> -#include <linux/pagemap.h> -#include <linux/highmem.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/smp_lock.h> -#include <linux/backing-dev.h> -#include <linux/ramfs.h> - -#include <asm/uaccess.h> -#include "internal.h" struct address_space_operations ramfs_aops = { .readpage = simple_readpage, diff --git a/fs/seq_file.c b/fs/seq_file.c index 7c40570b71d..555b9ac04c2 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -37,7 +37,7 @@ int seq_open(struct file *file, struct seq_operations *op) file->private_data = p; } memset(p, 0, sizeof(*p)); - sema_init(&p->sem, 1); + mutex_init(&p->lock); p->op = op; /* @@ -71,7 +71,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) void *p; int err = 0; - down(&m->sem); + mutex_lock(&m->lock); /* * seq_file->op->..m_start/m_stop/m_next may do special actions * or optimisations based on the file->f_version, so we want to @@ -164,7 +164,7 @@ Done: else *ppos += copied; file->f_version = m->version; - up(&m->sem); + mutex_unlock(&m->lock); return copied; Enomem: err = -ENOMEM; @@ -237,7 +237,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) struct seq_file *m = (struct seq_file *)file->private_data; long long retval = -EINVAL; - down(&m->sem); + mutex_lock(&m->lock); m->version = file->f_version; switch (origin) { case 1: @@ -260,7 +260,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) } } } - up(&m->sem); + mutex_unlock(&m->lock); file->f_version = m->version; return retval; } diff --git a/fs/super.c b/fs/super.c index e20b5580afd..425861cb1ca 100644 --- a/fs/super.c +++ b/fs/super.c @@ -76,9 +76,9 @@ static struct super_block *alloc_super(void) down_write(&s->s_umount); s->s_count = S_BIAS; atomic_set(&s->s_active, 1); - sema_init(&s->s_vfs_rename_sem,1); - sema_init(&s->s_dquot.dqio_sem, 1); - sema_init(&s->s_dquot.dqonoff_sem, 1); + mutex_init(&s->s_vfs_rename_mutex); + mutex_init(&s->s_dquot.dqio_mutex); + mutex_init(&s->s_dquot.dqonoff_mutex); init_rwsem(&s->s_dquot.dqptr_sem); init_waitqueue_head(&s->s_wait_unfrozen); s->s_maxbytes = MAX_NON_LFS; @@ -693,9 +693,9 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type, * will protect the lockfs code from trying to start a snapshot * while we are mounting */ - down(&bdev->bd_mount_sem); + mutex_lock(&bdev->bd_mount_mutex); s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); - up(&bdev->bd_mount_sem); + mutex_unlock(&bdev->bd_mount_mutex); if (IS_ERR(s)) goto out; diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 7f0e4b53085..b8a73f716fb 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -16,18 +16,6 @@ #include <linux/smp_lock.h> #include "sysv.h" -static inline void inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = sysv_add_link(dentry, inode); @@ -35,7 +23,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -124,7 +112,7 @@ out: return err; out_fail: - dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -138,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); @@ -151,7 +139,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) goto out; - inc_count(dir); + inode_inc_link_count(dir); inode = sysv_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -160,7 +148,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) sysv_set_inode(inode, 0); - inc_count(inode); + inode_inc_link_count(inode); err = sysv_make_empty(inode, dir); if (err) @@ -175,11 +163,11 @@ out: return err; out_fail: - dec_count(inode); - dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput(inode); out_dir: - dec_count(dir); + inode_dec_link_count(dir); goto out; } @@ -199,7 +187,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry) goto out; inode->i_ctime = dir->i_ctime; - dec_count(inode); + inode_dec_link_count(inode); out: return err; } @@ -213,8 +201,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry) err = sysv_unlink(dir, dentry); if (!err) { inode->i_size = 0; - dec_count(inode); - dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } return err; @@ -258,34 +246,34 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, new_de = sysv_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inc_count(old_inode); + inode_inc_link_count(old_inode); sysv_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) goto out_dir; } - inc_count(old_inode); + inode_inc_link_count(old_inode); err = sysv_add_link(new_dentry, old_inode); if (err) { - dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - inc_count(new_dir); + inode_inc_link_count(new_dir); } sysv_delete_entry(old_de, old_page); - dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { sysv_set_link(dir_de, dir_page, new_dir); - dec_count(old_dir); + inode_dec_link_count(old_dir); } return 0; diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 201049ac8a9..ea521f846d9 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -152,7 +152,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb, int bitmap_nr; unsigned long overflow; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -211,7 +211,7 @@ error_return: sb->s_dirt = 1; if (UDF_SB_LVIDBH(sb)) mark_buffer_dirty(UDF_SB_LVIDBH(sb)); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return; } @@ -226,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb, int nr_groups, bitmap_nr; struct buffer_head *bh; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) goto out; @@ -275,7 +275,7 @@ out: mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } @@ -291,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block * sb, int newblock = 0; *err = -ENOSPC; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); repeat: if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) @@ -364,7 +364,7 @@ repeat: } if (i >= (nr_groups*2)) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return newblock; } if (bit < sb->s_blocksize << 3) @@ -373,7 +373,7 @@ repeat: bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit >= sb->s_blocksize << 3) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -387,7 +387,7 @@ got_block: */ if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; } @@ -410,13 +410,13 @@ got_block: mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; error_return: *err = -EIO; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -433,7 +433,7 @@ static void udf_table_free_blocks(struct super_block * sb, int8_t etype; int i; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -666,7 +666,7 @@ static void udf_table_free_blocks(struct super_block * sb, error_return: sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return; } @@ -692,7 +692,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, else return 0; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); extoffset = sizeof(struct unallocSpaceEntry); bloc = UDF_I_LOCATION(table); @@ -736,7 +736,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, mark_buffer_dirty(UDF_SB_LVIDBH(sb)); sb->s_dirt = 1; } - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } @@ -761,7 +761,7 @@ static int udf_table_new_block(struct super_block * sb, else return newblock; - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) goal = 0; @@ -811,7 +811,7 @@ static int udf_table_new_block(struct super_block * sb, if (spread == 0xFFFFFFFF) { udf_release_data(goal_bh); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); return 0; } @@ -827,7 +827,7 @@ static int udf_table_new_block(struct super_block * sb, if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { udf_release_data(goal_bh); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; } @@ -846,7 +846,7 @@ static int udf_table_new_block(struct super_block * sb, } sb->s_dirt = 1; - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; } diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index c9b707b470c..3873c672cb4 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -42,7 +42,7 @@ void udf_free_inode(struct inode * inode) clear_inode(inode); - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); if (sbi->s_lvidbh) { if (S_ISDIR(inode->i_mode)) UDF_SB_LVIDIU(sb)->numDirs = @@ -53,7 +53,7 @@ void udf_free_inode(struct inode * inode) mark_buffer_dirty(sbi->s_lvidbh); } - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); } @@ -83,7 +83,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) return NULL; } - down(&sbi->s_alloc_sem); + mutex_lock(&sbi->s_alloc_mutex); UDF_I_UNIQUE(inode) = 0; UDF_I_LENEXTENTS(inode) = 0; UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; @@ -148,7 +148,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); insert_inode_hash(inode); mark_inode_dirty(inode); - up(&sbi->s_alloc_sem); + mutex_unlock(&sbi->s_alloc_mutex); if (DQUOT_ALLOC_INODE(inode)) { diff --git a/fs/udf/super.c b/fs/udf/super.c index 368d8f81fe5..9303c50c5d5 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1515,7 +1515,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) sb->s_fs_info = sbi; memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); - init_MUTEX(&sbi->s_alloc_sem); + mutex_init(&sbi->s_alloc_mutex); if (!udf_parse_options((char *)options, &uopt)) goto error_out; diff --git a/fs/ufs/file.c b/fs/ufs/file.c index ed69d7fe1b5..62ad481810e 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -23,18 +23,8 @@ * ext2 fs regular file handling primitives */ -#include <asm/uaccess.h> -#include <asm/system.h> - -#include <linux/errno.h> #include <linux/fs.h> #include <linux/ufs_fs.h> -#include <linux/fcntl.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/mm.h> -#include <linux/pagemap.h> -#include <linux/smp_lock.h> /* * We have mostly NULL's here: the current defaults are ok for diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 2958cde7d3d..8d5f98a01c7 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -43,18 +43,6 @@ #define UFSD(x) #endif -static inline void ufs_inc_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void ufs_dec_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ufs_add_link(dentry, inode); @@ -62,7 +50,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - ufs_dec_count(inode); + inode_dec_link_count(inode); iput(inode); return err; } @@ -173,7 +161,7 @@ out: return err; out_fail: - ufs_dec_count(inode); + inode_dec_link_count(inode); iput(inode); goto out; } @@ -191,7 +179,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, } inode->i_ctime = CURRENT_TIME_SEC; - ufs_inc_count(inode); + inode_inc_link_count(inode); atomic_inc(&inode->i_count); error = ufs_add_nondir(dentry, inode); @@ -208,7 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) goto out; lock_kernel(); - ufs_inc_count(dir); + inode_inc_link_count(dir); inode = ufs_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -218,7 +206,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) inode->i_op = &ufs_dir_inode_operations; inode->i_fop = &ufs_dir_operations; - ufs_inc_count(inode); + inode_inc_link_count(inode); err = ufs_make_empty(inode, dir); if (err) @@ -234,11 +222,11 @@ out: return err; out_fail: - ufs_dec_count(inode); - ufs_dec_count(inode); + inode_dec_link_count(inode); + inode_dec_link_count(inode); iput (inode); out_dir: - ufs_dec_count(dir); + inode_dec_link_count(dir); unlock_kernel(); goto out; } @@ -260,7 +248,7 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - ufs_dec_count(inode); + inode_dec_link_count(inode); err = 0; out: unlock_kernel(); @@ -277,8 +265,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) err = ufs_unlink(dir, dentry); if (!err) { inode->i_size = 0; - ufs_dec_count(inode); - ufs_dec_count(dir); + inode_dec_link_count(inode); + inode_dec_link_count(dir); } } unlock_kernel(); @@ -319,35 +307,35 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ufs_find_entry (new_dentry, &new_bh); if (!new_de) goto out_dir; - ufs_inc_count(old_inode); + inode_inc_link_count(old_inode); ufs_set_link(new_dir, new_de, new_bh, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - ufs_dec_count(new_inode); + inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= UFS_LINK_MAX) goto out_dir; } - ufs_inc_count(old_inode); + inode_inc_link_count(old_inode); err = ufs_add_link(new_dentry, old_inode); if (err) { - ufs_dec_count(old_inode); + inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) - ufs_inc_count(new_dir); + inode_inc_link_count(new_dir); } ufs_delete_entry (old_dir, old_de, old_bh); - ufs_dec_count(old_inode); + inode_dec_link_count(old_inode); if (dir_de) { ufs_set_link(old_inode, dir_de, dir_bh, new_dir); - ufs_dec_count(old_dir); + inode_dec_link_count(old_dir); } unlock_kernel(); return 0; diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6 index 97bd4743b46..5d73eaa1971 100644 --- a/fs/xfs/Makefile-linux-2.6 +++ b/fs/xfs/Makefile-linux-2.6 @@ -1,33 +1,19 @@ # -# Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. +# Copyright (c) 2000-2005 Silicon Graphics, Inc. +# All Rights Reserved. # -# This program is free software; you can redistribute it and/or modify it -# under the terms of version 2 of the GNU General Public License as +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as # published by the Free Software Foundation. # -# This program is distributed in the hope that it would be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# This program is distributed in the hope that it would be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. # -# Further, this software is distributed without any warranty that it is -# free of the rightful claim of any third person regarding infringement -# or the like. Any license provided herein, whether implied or -# otherwise, applies only to this software file. Patent licenses, if -# any, provided herein do not apply to combinations of this program with -# other software, or any other product whatsoever. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write the Free Software Foundation, Inc., 59 -# Temple Place - Suite 330, Boston MA 02111-1307, USA. -# -# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, -# Mountain View, CA 94043, or: -# -# http://www.sgi.com -# -# For further information regarding this notice, see: -# -# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ +# You should have received a copy of the GNU General Public License +# along with this program; if not, write the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char @@ -36,7 +22,7 @@ XFS_LINUX := linux-2.6 ifeq ($(CONFIG_XFS_DEBUG),y) EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG - EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING + EXTRA_CFLAGS += -DXFS_BUF_LOCK_TRACKING endif ifeq ($(CONFIG_XFS_TRACE),y) EXTRA_CFLAGS += -DXFS_ALLOC_TRACE @@ -50,7 +36,7 @@ ifeq ($(CONFIG_XFS_TRACE),y) EXTRA_CFLAGS += -DXFS_ILOCK_TRACE EXTRA_CFLAGS += -DXFS_LOG_TRACE EXTRA_CFLAGS += -DXFS_RW_TRACE - EXTRA_CFLAGS += -DPAGEBUF_TRACE + EXTRA_CFLAGS += -DXFS_BUF_TRACE EXTRA_CFLAGS += -DXFS_VNODE_TRACE endif diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index c64a29cdfff..f0268a84e6f 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -23,17 +23,8 @@ #include <linux/mm.h> /* - * memory management routines + * Process flags handling */ -#define KM_SLEEP 0x0001u -#define KM_NOSLEEP 0x0002u -#define KM_NOFS 0x0004u -#define KM_MAYFAIL 0x0008u - -#define kmem_zone kmem_cache -#define kmem_zone_t struct kmem_cache - -typedef unsigned long xfs_pflags_t; #define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO) #define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS) @@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t; *(NSTATEP) = *(OSTATEP); \ } while (0) -static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags) +/* + * General memory allocation interfaces + */ + +#define KM_SLEEP 0x0001u +#define KM_NOSLEEP 0x0002u +#define KM_NOFS 0x0004u +#define KM_MAYFAIL 0x0008u + +/* + * We use a special process flag to avoid recursive callbacks into + * the filesystem during transactions. We will also issue our own + * warnings, so we explicitly skip any generic ones (silly of us). + */ +static inline gfp_t +kmem_flags_convert(unsigned int __nocast flags) { - gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */ + gfp_t lflags; -#ifdef DEBUG - if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) { - printk(KERN_WARNING - "XFS: memory allocation with wrong flags (%x)\n", flags); - BUG(); - } -#endif + BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); if (flags & KM_NOSLEEP) { - lflags |= GFP_ATOMIC; + lflags = GFP_ATOMIC | __GFP_NOWARN; } else { - lflags |= GFP_KERNEL; - - /* avoid recusive callbacks to filesystem during transactions */ + lflags = GFP_KERNEL | __GFP_NOWARN; if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS)) lflags &= ~__GFP_FS; } - - return lflags; + return lflags; } -static __inline kmem_zone_t * +extern void *kmem_alloc(size_t, unsigned int __nocast); +extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); +extern void *kmem_zalloc(size_t, unsigned int __nocast); +extern void kmem_free(void *, size_t); + +/* + * Zone interfaces + */ + +#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN +#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT +#define KM_ZONE_SPREAD 0 + +#define kmem_zone kmem_cache +#define kmem_zone_t struct kmem_cache + +static inline kmem_zone_t * kmem_zone_init(int size, char *zone_name) { return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); } -static __inline void +static inline kmem_zone_t * +kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, + void (*construct)(void *, kmem_zone_t *, unsigned long)) +{ + return kmem_cache_create(zone_name, size, 0, flags, construct, NULL); +} + +static inline void kmem_zone_free(kmem_zone_t *zone, void *ptr) { kmem_cache_free(zone, ptr); } -static __inline void +static inline void kmem_zone_destroy(kmem_zone_t *zone) { if (zone && kmem_cache_destroy(zone)) BUG(); } -extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); +extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); -extern void *kmem_alloc(size_t, unsigned int __nocast); -extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); -extern void *kmem_zalloc(size_t, unsigned int __nocast); -extern void kmem_free(void *, size_t); +/* + * Low memory cache shrinkers + */ typedef struct shrinker *kmem_shaker_t; typedef int (*kmem_shake_func_t)(int, gfp_t); -static __inline kmem_shaker_t +static inline kmem_shaker_t kmem_shake_register(kmem_shake_func_t sfunc) { return set_shrinker(DEFAULT_SEEKS, sfunc); } -static __inline void +static inline void kmem_shake_deregister(kmem_shaker_t shrinker) { remove_shrinker(shrinker); } -static __inline int +static inline int kmem_shake_allow(gfp_t gfp_mask) { return (gfp_mask & __GFP_WAIT); diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 74d8be87f98..97fc056130e 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -43,7 +43,29 @@ #include <linux/pagevec.h> #include <linux/writeback.h> -STATIC void xfs_count_page_state(struct page *, int *, int *, int *); +STATIC void +xfs_count_page_state( + struct page *page, + int *delalloc, + int *unmapped, + int *unwritten) +{ + struct buffer_head *bh, *head; + + *delalloc = *unmapped = *unwritten = 0; + + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && !buffer_mapped(bh)) + (*unmapped) = 1; + else if (buffer_unwritten(bh) && !buffer_delay(bh)) + clear_buffer_unwritten(bh); + else if (buffer_unwritten(bh)) + (*unwritten) = 1; + else if (buffer_delay(bh)) + (*delalloc) = 1; + } while ((bh = bh->b_this_page) != head); +} #if defined(XFS_RW_TRACE) void @@ -54,7 +76,7 @@ xfs_page_trace( int mask) { xfs_inode_t *ip; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); loff_t isize = i_size_read(inode); loff_t offset = page_offset(page); int delalloc = -1, unmapped = -1, unwritten = -1; @@ -81,7 +103,7 @@ xfs_page_trace( (void *)((unsigned long)delalloc), (void *)((unsigned long)unmapped), (void *)((unsigned long)unwritten), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL); } #else @@ -192,7 +214,7 @@ xfs_alloc_ioend( ioend->io_uptodate = 1; /* cleared if any I/O fails */ ioend->io_list = NULL; ioend->io_type = type; - ioend->io_vnode = LINVFS_GET_VP(inode); + ioend->io_vnode = vn_from_inode(inode); ioend->io_buffer_head = NULL; ioend->io_buffer_tail = NULL; atomic_inc(&ioend->io_vnode->v_iocount); @@ -217,7 +239,7 @@ xfs_map_blocks( xfs_iomap_t *mapp, int flags) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error, nmaps = 1; VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error); @@ -462,28 +484,37 @@ xfs_add_to_ioend( } STATIC void +xfs_map_buffer( + struct buffer_head *bh, + xfs_iomap_t *mp, + xfs_off_t offset, + uint block_bits) +{ + sector_t bn; + + ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL); + + bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) + + ((offset - mp->iomap_offset) >> block_bits); + + ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME)); + + bh->b_blocknr = bn; + set_buffer_mapped(bh); +} + +STATIC void xfs_map_at_offset( struct buffer_head *bh, loff_t offset, int block_bits, xfs_iomap_t *iomapp) { - xfs_daddr_t bn; - int sector_shift; - ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); - ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); - - sector_shift = block_bits - BBSHIFT; - bn = (iomapp->iomap_bn >> sector_shift) + - ((offset - iomapp->iomap_offset) >> block_bits); - - ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME)); - ASSERT((bn << sector_shift) >= iomapp->iomap_bn); lock_buffer(bh); - bh->b_blocknr = bn; + xfs_map_buffer(bh, iomapp, offset, block_bits); bh->b_bdev = iomapp->iomap_target->bt_bdev; set_buffer_mapped(bh); clear_buffer_delay(bh); @@ -616,7 +647,7 @@ xfs_is_delayed_page( acceptable = (type == IOMAP_UNWRITTEN); else if (buffer_delay(bh)) acceptable = (type == IOMAP_DELAY); - else if (buffer_mapped(bh)) + else if (buffer_dirty(bh) && buffer_mapped(bh)) acceptable = (type == 0); else break; @@ -1040,8 +1071,159 @@ error: return err; } +/* + * writepage: Called from one of two places: + * + * 1. we are flushing a delalloc buffer head. + * + * 2. we are writing out a dirty page. Typically the page dirty + * state is cleared before we get here. In this case is it + * conceivable we have no buffer heads. + * + * For delalloc space on the page we need to allocate space and + * flush it. For unmapped buffer heads on the page we should + * allocate space if the page is uptodate. For any other dirty + * buffer heads on the page we should flush them. + * + * If we detect that a transaction would be required to flush + * the page, we have to check the process flags first, if we + * are already in a transaction or disk I/O during allocations + * is off, we need to fail the writepage and redirty the page. + */ + +STATIC int +xfs_vm_writepage( + struct page *page, + struct writeback_control *wbc) +{ + int error; + int need_trans; + int delalloc, unmapped, unwritten; + struct inode *inode = page->mapping->host; + + xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); + + /* + * We need a transaction if: + * 1. There are delalloc buffers on the page + * 2. The page is uptodate and we have unmapped buffers + * 3. The page is uptodate and we have no buffers + * 4. There are unwritten buffers on the page + */ + + if (!page_has_buffers(page)) { + unmapped = 1; + need_trans = 1; + } else { + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!PageUptodate(page)) + unmapped = 0; + need_trans = delalloc + unmapped + unwritten; + } + + /* + * If we need a transaction and the process flags say + * we are already in a transaction, or no IO is allowed + * then mark the page dirty again and leave the page + * as is. + */ + if (PFLAGS_TEST_FSTRANS() && need_trans) + goto out_fail; + + /* + * Delay hooking up buffer heads until we have + * made our go/no-go decision. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, 1 << inode->i_blkbits, 0); + + /* + * Convert delayed allocate, unwritten or unmapped space + * to real space and flush out to disk. + */ + error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); + if (error == -EAGAIN) + goto out_fail; + if (unlikely(error < 0)) + goto out_unlock; + + return 0; + +out_fail: + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; +out_unlock: + unlock_page(page); + return error; +} + +/* + * Called to move a page into cleanable state - and from there + * to be released. Possibly the page is already clean. We always + * have buffer heads in this call. + * + * Returns 0 if the page is ok to release, 1 otherwise. + * + * Possible scenarios are: + * + * 1. We are being called to release a page which has been written + * to via regular I/O. buffer heads will be dirty and possibly + * delalloc. If no delalloc buffer heads in this case then we + * can just return zero. + * + * 2. We are called to release a page which has been written via + * mmap, all we need to do is ensure there is no delalloc + * state in the buffer heads, if not we can let the caller + * free them and we should come back later via writepage. + */ STATIC int -__linvfs_get_block( +xfs_vm_releasepage( + struct page *page, + gfp_t gfp_mask) +{ + struct inode *inode = page->mapping->host; + int dirty, delalloc, unmapped, unwritten; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + }; + + xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); + + if (!page_has_buffers(page)) + return 0; + + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!delalloc && !unwritten) + goto free_buffers; + + if (!(gfp_mask & __GFP_FS)) + return 0; + + /* If we are already inside a transaction or the thread cannot + * do I/O, we cannot release this page. + */ + if (PFLAGS_TEST_FSTRANS()) + return 0; + + /* + * Convert delalloc space to real space, do not flush the + * data out to disk, that will be done by the caller. + * Never need to allocate space here - we will always + * come back to writepage in that case. + */ + dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); + if (dirty == 0 && !unwritten) + goto free_buffers; + return 0; + +free_buffers: + return try_to_free_buffers(page); +} + +STATIC int +__xfs_get_block( struct inode *inode, sector_t iblock, unsigned long blocks, @@ -1050,7 +1232,7 @@ __linvfs_get_block( int direct, bmapi_flags_t flags) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_iomap_t iomap; xfs_off_t offset; ssize_t size; @@ -1073,21 +1255,13 @@ __linvfs_get_block( return 0; if (iomap.iomap_bn != IOMAP_DADDR_NULL) { - xfs_daddr_t bn; - xfs_off_t delta; - - /* For unwritten extents do not report a disk address on + /* + * For unwritten extents do not report a disk address on * the read case (treat as if we're reading into a hole). */ if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) { - delta = offset - iomap.iomap_offset; - delta >>= inode->i_blkbits; - - bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT); - bn += delta; - BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME)); - bh_result->b_blocknr = bn; - set_buffer_mapped(bh_result); + xfs_map_buffer(bh_result, &iomap, offset, + inode->i_blkbits); } if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) { if (direct) @@ -1130,30 +1304,30 @@ __linvfs_get_block( } int -linvfs_get_block( +xfs_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - return __linvfs_get_block(inode, iblock, 0, bh_result, + return __xfs_get_block(inode, iblock, 0, bh_result, create, 0, BMAPI_WRITE); } STATIC int -linvfs_get_blocks_direct( +xfs_get_blocks_direct( struct inode *inode, sector_t iblock, unsigned long max_blocks, struct buffer_head *bh_result, int create) { - return __linvfs_get_block(inode, iblock, max_blocks, bh_result, + return __xfs_get_block(inode, iblock, max_blocks, bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); } STATIC void -linvfs_end_io_direct( +xfs_end_io_direct( struct kiocb *iocb, loff_t offset, ssize_t size, @@ -1191,7 +1365,7 @@ linvfs_end_io_direct( } STATIC ssize_t -linvfs_direct_IO( +xfs_vm_direct_IO( int rw, struct kiocb *iocb, const struct iovec *iov, @@ -1200,7 +1374,7 @@ linvfs_direct_IO( { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_iomap_t iomap; int maps = 1; int error; @@ -1215,164 +1389,61 @@ linvfs_direct_IO( ret = blockdev_direct_IO_own_locking(rw, iocb, inode, iomap.iomap_target->bt_bdev, iov, offset, nr_segs, - linvfs_get_blocks_direct, - linvfs_end_io_direct); + xfs_get_blocks_direct, + xfs_end_io_direct); if (unlikely(ret <= 0 && iocb->private)) xfs_destroy_ioend(iocb->private); return ret; } +STATIC int +xfs_vm_prepare_write( + struct file *file, + struct page *page, + unsigned int from, + unsigned int to) +{ + return block_prepare_write(page, from, to, xfs_get_block); +} STATIC sector_t -linvfs_bmap( +xfs_vm_bmap( struct address_space *mapping, sector_t block) { struct inode *inode = (struct inode *)mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; - vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); VOP_RWLOCK(vp, VRWLOCK_READ); VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); VOP_RWUNLOCK(vp, VRWLOCK_READ); - return generic_block_bmap(mapping, block, linvfs_get_block); + return generic_block_bmap(mapping, block, xfs_get_block); } STATIC int -linvfs_readpage( +xfs_vm_readpage( struct file *unused, struct page *page) { - return mpage_readpage(page, linvfs_get_block); + return mpage_readpage(page, xfs_get_block); } STATIC int -linvfs_readpages( +xfs_vm_readpages( struct file *unused, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block); -} - -STATIC void -xfs_count_page_state( - struct page *page, - int *delalloc, - int *unmapped, - int *unwritten) -{ - struct buffer_head *bh, *head; - - *delalloc = *unmapped = *unwritten = 0; - - bh = head = page_buffers(page); - do { - if (buffer_uptodate(bh) && !buffer_mapped(bh)) - (*unmapped) = 1; - else if (buffer_unwritten(bh) && !buffer_delay(bh)) - clear_buffer_unwritten(bh); - else if (buffer_unwritten(bh)) - (*unwritten) = 1; - else if (buffer_delay(bh)) - (*delalloc) = 1; - } while ((bh = bh->b_this_page) != head); + return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); } - -/* - * writepage: Called from one of two places: - * - * 1. we are flushing a delalloc buffer head. - * - * 2. we are writing out a dirty page. Typically the page dirty - * state is cleared before we get here. In this case is it - * conceivable we have no buffer heads. - * - * For delalloc space on the page we need to allocate space and - * flush it. For unmapped buffer heads on the page we should - * allocate space if the page is uptodate. For any other dirty - * buffer heads on the page we should flush them. - * - * If we detect that a transaction would be required to flush - * the page, we have to check the process flags first, if we - * are already in a transaction or disk I/O during allocations - * is off, we need to fail the writepage and redirty the page. - */ - STATIC int -linvfs_writepage( - struct page *page, - struct writeback_control *wbc) -{ - int error; - int need_trans; - int delalloc, unmapped, unwritten; - struct inode *inode = page->mapping->host; - - xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); - - /* - * We need a transaction if: - * 1. There are delalloc buffers on the page - * 2. The page is uptodate and we have unmapped buffers - * 3. The page is uptodate and we have no buffers - * 4. There are unwritten buffers on the page - */ - - if (!page_has_buffers(page)) { - unmapped = 1; - need_trans = 1; - } else { - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!PageUptodate(page)) - unmapped = 0; - need_trans = delalloc + unmapped + unwritten; - } - - /* - * If we need a transaction and the process flags say - * we are already in a transaction, or no IO is allowed - * then mark the page dirty again and leave the page - * as is. - */ - if (PFLAGS_TEST_FSTRANS() && need_trans) - goto out_fail; - - /* - * Delay hooking up buffer heads until we have - * made our go/no-go decision. - */ - if (!page_has_buffers(page)) - create_empty_buffers(page, 1 << inode->i_blkbits, 0); - - /* - * Convert delayed allocate, unwritten or unmapped space - * to real space and flush out to disk. - */ - error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); - if (error == -EAGAIN) - goto out_fail; - if (unlikely(error < 0)) - goto out_unlock; - - return 0; - -out_fail: - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; -out_unlock: - unlock_page(page); - return error; -} - -STATIC int -linvfs_invalidate_page( +xfs_vm_invalidatepage( struct page *page, unsigned long offset) { @@ -1381,87 +1452,16 @@ linvfs_invalidate_page( return block_invalidatepage(page, offset); } -/* - * Called to move a page into cleanable state - and from there - * to be released. Possibly the page is already clean. We always - * have buffer heads in this call. - * - * Returns 0 if the page is ok to release, 1 otherwise. - * - * Possible scenarios are: - * - * 1. We are being called to release a page which has been written - * to via regular I/O. buffer heads will be dirty and possibly - * delalloc. If no delalloc buffer heads in this case then we - * can just return zero. - * - * 2. We are called to release a page which has been written via - * mmap, all we need to do is ensure there is no delalloc - * state in the buffer heads, if not we can let the caller - * free them and we should come back later via writepage. - */ -STATIC int -linvfs_release_page( - struct page *page, - gfp_t gfp_mask) -{ - struct inode *inode = page->mapping->host; - int dirty, delalloc, unmapped, unwritten; - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = 1, - }; - - xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); - - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!delalloc && !unwritten) - goto free_buffers; - - if (!(gfp_mask & __GFP_FS)) - return 0; - - /* If we are already inside a transaction or the thread cannot - * do I/O, we cannot release this page. - */ - if (PFLAGS_TEST_FSTRANS()) - return 0; - - /* - * Convert delalloc space to real space, do not flush the - * data out to disk, that will be done by the caller. - * Never need to allocate space here - we will always - * come back to writepage in that case. - */ - dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); - if (dirty == 0 && !unwritten) - goto free_buffers; - return 0; - -free_buffers: - return try_to_free_buffers(page); -} - -STATIC int -linvfs_prepare_write( - struct file *file, - struct page *page, - unsigned int from, - unsigned int to) -{ - return block_prepare_write(page, from, to, linvfs_get_block); -} - -struct address_space_operations linvfs_aops = { - .readpage = linvfs_readpage, - .readpages = linvfs_readpages, - .writepage = linvfs_writepage, +struct address_space_operations xfs_address_space_operations = { + .readpage = xfs_vm_readpage, + .readpages = xfs_vm_readpages, + .writepage = xfs_vm_writepage, .sync_page = block_sync_page, - .releasepage = linvfs_release_page, - .invalidatepage = linvfs_invalidate_page, - .prepare_write = linvfs_prepare_write, + .releasepage = xfs_vm_releasepage, + .invalidatepage = xfs_vm_invalidatepage, + .prepare_write = xfs_vm_prepare_write, .commit_write = generic_commit_write, - .bmap = linvfs_bmap, - .direct_IO = linvfs_direct_IO, + .bmap = xfs_vm_bmap, + .direct_IO = xfs_vm_direct_IO, .migratepage = buffer_migrate_page, }; diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 55339dd5a30..795699f121d 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -40,7 +40,7 @@ typedef struct xfs_ioend { struct work_struct io_work; /* xfsdatad work queue */ } xfs_ioend_t; -extern struct address_space_operations linvfs_aops; -extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); +extern struct address_space_operations xfs_address_space_operations; +extern int xfs_get_block(struct inode *, sector_t, struct buffer_head *, int); #endif /* __XFS_IOPS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 8cdfa415165..9fb0312665c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1806,13 +1806,12 @@ xfs_flush_buftarg( int __init xfs_buf_init(void) { - int error = -ENOMEM; - #ifdef XFS_BUF_TRACE xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); #endif - xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); + xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", + KM_ZONE_HWALIGN, NULL); if (!xfs_buf_zone) goto out_free_trace_buf; @@ -1840,7 +1839,7 @@ xfs_buf_init(void) #ifdef XFS_BUF_TRACE ktrace_free(xfs_buf_trace_buf); #endif - return error; + return -ENOMEM; } void diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 80eb249f2fa..b768ea910bb 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c @@ -25,6 +25,8 @@ #include "xfs_mount.h" #include "xfs_export.h" +STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, }; + /* * XFS encodes and decodes the fileid portion of NFS filehandles * itself instead of letting the generic NFS code do it. This @@ -37,7 +39,7 @@ */ STATIC struct dentry * -linvfs_decode_fh( +xfs_fs_decode_fh( struct super_block *sb, __u32 *fh, int fh_len, @@ -78,12 +80,12 @@ linvfs_decode_fh( } fh = (__u32 *)&ifid; - return find_exported_dentry(sb, fh, parent, acceptable, context); + return sb->s_export_op->find_exported_dentry(sb, fh, parent, acceptable, context); } STATIC int -linvfs_encode_fh( +xfs_fs_encode_fh( struct dentry *dentry, __u32 *fh, int *max_len, @@ -95,7 +97,7 @@ linvfs_encode_fh( int len; int is64 = 0; #if XFS_BIG_INUMS - vfs_t *vfs = LINVFS_GET_VFS(inode->i_sb); + vfs_t *vfs = vfs_from_sb(inode->i_sb); if (!(vfs->vfs_flag & VFS_32BITINODES)) { /* filesystem may contain 64bit inode numbers */ @@ -130,21 +132,21 @@ linvfs_encode_fh( } STATIC struct dentry * -linvfs_get_dentry( +xfs_fs_get_dentry( struct super_block *sb, void *data) { vnode_t *vp; struct inode *inode; struct dentry *result; - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; VFS_VGET(vfsp, &vp, (fid_t *)data, error); if (error || vp == NULL) return ERR_PTR(-ESTALE) ; - inode = LINVFS_GET_IP(vp); + inode = vn_to_inode(vp); result = d_alloc_anon(inode); if (!result) { iput(inode); @@ -154,25 +156,20 @@ linvfs_get_dentry( } STATIC struct dentry * -linvfs_get_parent( +xfs_fs_get_parent( struct dentry *child) { int error; vnode_t *vp, *cvp; struct dentry *parent; - struct dentry dotdot; - - dotdot.d_name.name = ".."; - dotdot.d_name.len = 2; - dotdot.d_inode = NULL; cvp = NULL; - vp = LINVFS_GET_VP(child->d_inode); + vp = vn_from_inode(child->d_inode); VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error); if (unlikely(error)) return ERR_PTR(-error); - parent = d_alloc_anon(LINVFS_GET_IP(cvp)); + parent = d_alloc_anon(vn_to_inode(cvp)); if (unlikely(!parent)) { VN_RELE(cvp); return ERR_PTR(-ENOMEM); @@ -180,9 +177,9 @@ linvfs_get_parent( return parent; } -struct export_operations linvfs_export_ops = { - .decode_fh = linvfs_decode_fh, - .encode_fh = linvfs_encode_fh, - .get_parent = linvfs_get_parent, - .get_dentry = linvfs_get_dentry, +struct export_operations xfs_export_operations = { + .decode_fh = xfs_fs_decode_fh, + .encode_fh = xfs_fs_encode_fh, + .get_parent = xfs_fs_get_parent, + .get_dentry = xfs_fs_get_dentry, }; diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index ced4404339c..185567a6a56 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -43,13 +43,13 @@ #include <linux/dcache.h> #include <linux/smp_lock.h> -static struct vm_operations_struct linvfs_file_vm_ops; +static struct vm_operations_struct xfs_file_vm_ops; #ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct linvfs_dmapi_file_vm_ops; +static struct vm_operations_struct xfs_dmapi_file_vm_ops; #endif STATIC inline ssize_t -__linvfs_read( +__xfs_file_read( struct kiocb *iocb, char __user *buf, int ioflags, @@ -58,7 +58,7 @@ __linvfs_read( { struct iovec iov = {buf, count}; struct file *file = iocb->ki_filp; - vnode_t *vp = LINVFS_GET_VP(file->f_dentry->d_inode); + vnode_t *vp = vn_from_inode(file->f_dentry->d_inode); ssize_t rval; BUG_ON(iocb->ki_pos != pos); @@ -71,28 +71,28 @@ __linvfs_read( STATIC ssize_t -linvfs_aio_read( +xfs_file_aio_read( struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) { - return __linvfs_read(iocb, buf, IO_ISAIO, count, pos); + return __xfs_file_read(iocb, buf, IO_ISAIO, count, pos); } STATIC ssize_t -linvfs_aio_read_invis( +xfs_file_aio_read_invis( struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) { - return __linvfs_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); + return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); } STATIC inline ssize_t -__linvfs_write( +__xfs_file_write( struct kiocb *iocb, const char __user *buf, int ioflags, @@ -102,7 +102,7 @@ __linvfs_write( struct iovec iov = {(void __user *)buf, count}; struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); ssize_t rval; BUG_ON(iocb->ki_pos != pos); @@ -115,28 +115,28 @@ __linvfs_write( STATIC ssize_t -linvfs_aio_write( +xfs_file_aio_write( struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) { - return __linvfs_write(iocb, buf, IO_ISAIO, count, pos); + return __xfs_file_write(iocb, buf, IO_ISAIO, count, pos); } STATIC ssize_t -linvfs_aio_write_invis( +xfs_file_aio_write_invis( struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) { - return __linvfs_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); + return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); } STATIC inline ssize_t -__linvfs_readv( +__xfs_file_readv( struct file *file, const struct iovec *iov, int ioflags, @@ -144,8 +144,8 @@ __linvfs_readv( loff_t *ppos) { struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); - struct kiocb kiocb; + vnode_t *vp = vn_from_inode(inode); + struct kiocb kiocb; ssize_t rval; init_sync_kiocb(&kiocb, file); @@ -160,28 +160,28 @@ __linvfs_readv( } STATIC ssize_t -linvfs_readv( +xfs_file_readv( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_readv(file, iov, 0, nr_segs, ppos); + return __xfs_file_readv(file, iov, 0, nr_segs, ppos); } STATIC ssize_t -linvfs_readv_invis( +xfs_file_readv_invis( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_readv(file, iov, IO_INVIS, nr_segs, ppos); + return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos); } STATIC inline ssize_t -__linvfs_writev( +__xfs_file_writev( struct file *file, const struct iovec *iov, int ioflags, @@ -189,8 +189,8 @@ __linvfs_writev( loff_t *ppos) { struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); - struct kiocb kiocb; + vnode_t *vp = vn_from_inode(inode); + struct kiocb kiocb; ssize_t rval; init_sync_kiocb(&kiocb, file); @@ -206,34 +206,34 @@ __linvfs_writev( STATIC ssize_t -linvfs_writev( +xfs_file_writev( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_writev(file, iov, 0, nr_segs, ppos); + return __xfs_file_writev(file, iov, 0, nr_segs, ppos); } STATIC ssize_t -linvfs_writev_invis( +xfs_file_writev_invis( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __linvfs_writev(file, iov, IO_INVIS, nr_segs, ppos); + return __xfs_file_writev(file, iov, IO_INVIS, nr_segs, ppos); } STATIC ssize_t -linvfs_sendfile( +xfs_file_sendfile( struct file *filp, loff_t *ppos, size_t count, read_actor_t actor, void *target) { - vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode); ssize_t rval; VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval); @@ -242,11 +242,11 @@ linvfs_sendfile( STATIC int -linvfs_open( +xfs_file_open( struct inode *inode, struct file *filp) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) @@ -259,11 +259,11 @@ linvfs_open( STATIC int -linvfs_release( +xfs_file_release( struct inode *inode, struct file *filp) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error = 0; if (vp) @@ -273,13 +273,13 @@ linvfs_release( STATIC int -linvfs_fsync( +xfs_file_fsync( struct file *filp, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; int flags = FSYNC_WAIT; @@ -292,7 +292,7 @@ linvfs_fsync( } /* - * linvfs_readdir maps to VOP_READDIR(). + * xfs_file_readdir maps to VOP_READDIR(). * We need to build a uio, cred, ... */ @@ -301,13 +301,13 @@ linvfs_fsync( #ifdef CONFIG_XFS_DMAPI STATIC struct page * -linvfs_filemap_nopage( +xfs_vm_nopage( struct vm_area_struct *area, unsigned long address, int *type) { struct inode *inode = area->vm_file->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); int error; @@ -324,7 +324,7 @@ linvfs_filemap_nopage( STATIC int -linvfs_readdir( +xfs_file_readdir( struct file *filp, void *dirent, filldir_t filldir) @@ -340,7 +340,7 @@ linvfs_readdir( xfs_off_t start_offset, curr_offset; xfs_dirent_t *dbp = NULL; - vp = LINVFS_GET_VP(filp->f_dentry->d_inode); + vp = vn_from_inode(filp->f_dentry->d_inode); ASSERT(vp); /* Try fairly hard to get memory */ @@ -404,39 +404,40 @@ done: STATIC int -linvfs_file_mmap( +xfs_file_mmap( struct file *filp, struct vm_area_struct *vma) { struct inode *ip = filp->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(ip); - vattr_t va = { .va_mask = XFS_AT_UPDATIME }; + vnode_t *vp = vn_from_inode(ip); + vattr_t vattr; int error; - vma->vm_ops = &linvfs_file_vm_ops; + vma->vm_ops = &xfs_file_vm_ops; #ifdef CONFIG_XFS_DMAPI if (vp->v_vfsp->vfs_flag & VFS_DMI) { - vma->vm_ops = &linvfs_dmapi_file_vm_ops; + vma->vm_ops = &xfs_dmapi_file_vm_ops; } #endif /* CONFIG_XFS_DMAPI */ - VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); - if (!error) - vn_revalidate(vp); /* update Linux inode flags */ + vattr.va_mask = XFS_AT_UPDATIME; + VOP_SETATTR(vp, &vattr, XFS_AT_UPDATIME, NULL, error); + if (likely(!error)) + __vn_revalidate(vp, &vattr); /* update flags */ return 0; } STATIC long -linvfs_ioctl( +xfs_file_ioctl( struct file *filp, unsigned int cmd, unsigned long arg) { int error; - struct inode *inode = filp->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + struct inode *inode = filp->f_dentry->d_inode; + vnode_t *vp = vn_from_inode(inode); VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error); VMODIFY(vp); @@ -451,14 +452,14 @@ linvfs_ioctl( } STATIC long -linvfs_ioctl_invis( +xfs_file_ioctl_invis( struct file *filp, unsigned int cmd, unsigned long arg) { int error; - struct inode *inode = filp->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + struct inode *inode = filp->f_dentry->d_inode; + vnode_t *vp = vn_from_inode(inode); ASSERT(vp); VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error); @@ -476,11 +477,11 @@ linvfs_ioctl_invis( #ifdef CONFIG_XFS_DMAPI #ifdef HAVE_VMOP_MPROTECT STATIC int -linvfs_mprotect( +xfs_vm_mprotect( struct vm_area_struct *vma, unsigned int newflags) { - vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode); + vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode); int error = 0; if (vp->v_vfsp->vfs_flag & VFS_DMI) { @@ -503,10 +504,10 @@ linvfs_mprotect( * it back online. */ STATIC int -linvfs_open_exec( +xfs_file_open_exec( struct inode *inode) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); int error = 0; xfs_inode_t *ip; @@ -527,69 +528,69 @@ open_exec_out: } #endif /* HAVE_FOP_OPEN_EXEC */ -struct file_operations linvfs_file_operations = { +struct file_operations xfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, - .readv = linvfs_readv, - .writev = linvfs_writev, - .aio_read = linvfs_aio_read, - .aio_write = linvfs_aio_write, - .sendfile = linvfs_sendfile, - .unlocked_ioctl = linvfs_ioctl, + .readv = xfs_file_readv, + .writev = xfs_file_writev, + .aio_read = xfs_file_aio_read, + .aio_write = xfs_file_aio_write, + .sendfile = xfs_file_sendfile, + .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = linvfs_compat_ioctl, + .compat_ioctl = xfs_file_compat_ioctl, #endif - .mmap = linvfs_file_mmap, - .open = linvfs_open, - .release = linvfs_release, - .fsync = linvfs_fsync, + .mmap = xfs_file_mmap, + .open = xfs_file_open, + .release = xfs_file_release, + .fsync = xfs_file_fsync, #ifdef HAVE_FOP_OPEN_EXEC - .open_exec = linvfs_open_exec, + .open_exec = xfs_file_open_exec, #endif }; -struct file_operations linvfs_invis_file_operations = { +struct file_operations xfs_invis_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, - .readv = linvfs_readv_invis, - .writev = linvfs_writev_invis, - .aio_read = linvfs_aio_read_invis, - .aio_write = linvfs_aio_write_invis, - .sendfile = linvfs_sendfile, - .unlocked_ioctl = linvfs_ioctl_invis, + .readv = xfs_file_readv_invis, + .writev = xfs_file_writev_invis, + .aio_read = xfs_file_aio_read_invis, + .aio_write = xfs_file_aio_write_invis, + .sendfile = xfs_file_sendfile, + .unlocked_ioctl = xfs_file_ioctl_invis, #ifdef CONFIG_COMPAT - .compat_ioctl = linvfs_compat_invis_ioctl, + .compat_ioctl = xfs_file_compat_invis_ioctl, #endif - .mmap = linvfs_file_mmap, - .open = linvfs_open, - .release = linvfs_release, - .fsync = linvfs_fsync, + .mmap = xfs_file_mmap, + .open = xfs_file_open, + .release = xfs_file_release, + .fsync = xfs_file_fsync, }; -struct file_operations linvfs_dir_operations = { +struct file_operations xfs_dir_file_operations = { .read = generic_read_dir, - .readdir = linvfs_readdir, - .unlocked_ioctl = linvfs_ioctl, + .readdir = xfs_file_readdir, + .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = linvfs_compat_ioctl, + .compat_ioctl = xfs_file_compat_ioctl, #endif - .fsync = linvfs_fsync, + .fsync = xfs_file_fsync, }; -static struct vm_operations_struct linvfs_file_vm_ops = { +static struct vm_operations_struct xfs_file_vm_ops = { .nopage = filemap_nopage, .populate = filemap_populate, }; #ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct linvfs_dmapi_file_vm_ops = { - .nopage = linvfs_filemap_nopage, +static struct vm_operations_struct xfs_dmapi_file_vm_ops = { + .nopage = xfs_vm_nopage, .populate = filemap_populate, #ifdef HAVE_VMOP_MPROTECT - .mprotect = linvfs_mprotect, + .mprotect = xfs_vm_mprotect, #endif }; #endif /* CONFIG_XFS_DMAPI */ diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 4fa4b1a5187..575f2a790f3 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -57,7 +57,7 @@ fs_tosspages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); if (VN_CACHED(vp)) truncate_inode_pages(ip->i_mapping, first); @@ -76,7 +76,7 @@ fs_flushinval_pages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); if (VN_CACHED(vp)) { filemap_write_and_wait(ip->i_mapping); @@ -98,7 +98,7 @@ fs_flush_pages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); if (VN_CACHED(vp)) { filemap_fdatawrite(ip->i_mapping); diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 4db47790415..84478491609 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -138,7 +138,7 @@ xfs_find_handle( } /* we need the vnode */ - vp = LINVFS_GET_VP(inode); + vp = vn_from_inode(inode); /* now we can grab the fsid */ memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); @@ -256,7 +256,7 @@ xfs_vget_fsop_handlereq( } vpp = XFS_ITOV(ip); - inodep = LINVFS_GET_IP(vpp); + inodep = vn_to_inode(vpp); xfs_iunlock(ip, XFS_ILOCK_SHARED); *vp = vpp; @@ -344,7 +344,7 @@ xfs_open_by_handle( return -XFS_ERROR(-PTR_ERR(filp)); } if (inode->i_mode & S_IFREG) - filp->f_op = &linvfs_invis_file_operations; + filp->f_op = &xfs_invis_file_operations; fd_install(new_fd, filp); return new_fd; @@ -715,7 +715,7 @@ xfs_ioctl( xfs_inode_t *ip; xfs_mount_t *mp; - vp = LINVFS_GET_VP(inode); + vp = vn_from_inode(inode); vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address); @@ -1160,105 +1160,129 @@ xfs_ioc_xattr( void __user *arg) { struct fsxattr fa; - vattr_t va; - int error; + struct vattr *vattr; + int error = 0; int attr_flags; unsigned int flags; + vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); + if (unlikely(!vattr)) + return -ENOMEM; + switch (cmd) { case XFS_IOC_FSGETXATTR: { - va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ - XFS_AT_NEXTENTS | XFS_AT_PROJID; - VOP_GETATTR(vp, &va, 0, NULL, error); - if (error) - return -error; + vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ + XFS_AT_NEXTENTS | XFS_AT_PROJID; + VOP_GETATTR(vp, vattr, 0, NULL, error); + if (unlikely(error)) { + error = -error; + break; + } - fa.fsx_xflags = va.va_xflags; - fa.fsx_extsize = va.va_extsize; - fa.fsx_nextents = va.va_nextents; - fa.fsx_projid = va.va_projid; + fa.fsx_xflags = vattr->va_xflags; + fa.fsx_extsize = vattr->va_extsize; + fa.fsx_nextents = vattr->va_nextents; + fa.fsx_projid = vattr->va_projid; - if (copy_to_user(arg, &fa, sizeof(fa))) - return -XFS_ERROR(EFAULT); - return 0; + if (copy_to_user(arg, &fa, sizeof(fa))) { + error = -EFAULT; + break; + } + break; } case XFS_IOC_FSSETXATTR: { - if (copy_from_user(&fa, arg, sizeof(fa))) - return -XFS_ERROR(EFAULT); + if (copy_from_user(&fa, arg, sizeof(fa))) { + error = -EFAULT; + break; + } attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; - va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; - va.va_xflags = fa.fsx_xflags; - va.va_extsize = fa.fsx_extsize; - va.va_projid = fa.fsx_projid; + vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; + vattr->va_xflags = fa.fsx_xflags; + vattr->va_extsize = fa.fsx_extsize; + vattr->va_projid = fa.fsx_projid; - VOP_SETATTR(vp, &va, attr_flags, NULL, error); - if (!error) - vn_revalidate(vp); /* update Linux inode flags */ - return -error; + VOP_SETATTR(vp, vattr, attr_flags, NULL, error); + if (likely(!error)) + __vn_revalidate(vp, vattr); /* update flags */ + error = -error; + break; } case XFS_IOC_FSGETXATTRA: { - va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ - XFS_AT_ANEXTENTS | XFS_AT_PROJID; - VOP_GETATTR(vp, &va, 0, NULL, error); - if (error) - return -error; + vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ + XFS_AT_ANEXTENTS | XFS_AT_PROJID; + VOP_GETATTR(vp, vattr, 0, NULL, error); + if (unlikely(error)) { + error = -error; + break; + } - fa.fsx_xflags = va.va_xflags; - fa.fsx_extsize = va.va_extsize; - fa.fsx_nextents = va.va_anextents; - fa.fsx_projid = va.va_projid; + fa.fsx_xflags = vattr->va_xflags; + fa.fsx_extsize = vattr->va_extsize; + fa.fsx_nextents = vattr->va_anextents; + fa.fsx_projid = vattr->va_projid; - if (copy_to_user(arg, &fa, sizeof(fa))) - return -XFS_ERROR(EFAULT); - return 0; + if (copy_to_user(arg, &fa, sizeof(fa))) { + error = -EFAULT; + break; + } + break; } case XFS_IOC_GETXFLAGS: { flags = xfs_di2lxflags(ip->i_d.di_flags); if (copy_to_user(arg, &flags, sizeof(flags))) - return -XFS_ERROR(EFAULT); - return 0; + error = -EFAULT; + break; } case XFS_IOC_SETXFLAGS: { - if (copy_from_user(&flags, arg, sizeof(flags))) - return -XFS_ERROR(EFAULT); + if (copy_from_user(&flags, arg, sizeof(flags))) { + error = -EFAULT; + break; + } if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \ LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \ - LINUX_XFLAG_SYNC)) - return -XFS_ERROR(EOPNOTSUPP); + LINUX_XFLAG_SYNC)) { + error = -EOPNOTSUPP; + break; + } attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; - va.va_mask = XFS_AT_XFLAGS; - va.va_xflags = xfs_merge_ioc_xflags(flags, - xfs_ip2xflags(ip)); + vattr->va_mask = XFS_AT_XFLAGS; + vattr->va_xflags = xfs_merge_ioc_xflags(flags, + xfs_ip2xflags(ip)); - VOP_SETATTR(vp, &va, attr_flags, NULL, error); - if (!error) - vn_revalidate(vp); /* update Linux inode flags */ - return -error; + VOP_SETATTR(vp, vattr, attr_flags, NULL, error); + if (likely(!error)) + __vn_revalidate(vp, vattr); /* update flags */ + error = -error; + break; } case XFS_IOC_GETVERSION: { - flags = LINVFS_GET_IP(vp)->i_generation; + flags = vn_to_inode(vp)->i_generation; if (copy_to_user(arg, &flags, sizeof(flags))) - return -XFS_ERROR(EFAULT); - return 0; + error = -EFAULT; + break; } default: - return -ENOTTY; + error = -ENOTTY; + break; } + + kfree(vattr); + return error; } STATIC int diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index a7c9ba1a9f7..b6321abd9a8 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -107,11 +107,11 @@ xfs_ioctl32_bulkstat( #endif STATIC long -__linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) +xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) { int error; struct inode *inode = f->f_dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_to_inode(inode); switch (cmd) { case XFS_IOC_DIOINFO: @@ -196,19 +196,19 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) } long -linvfs_compat_ioctl( +xfs_file_compat_ioctl( struct file *f, unsigned cmd, unsigned long arg) { - return __linvfs_compat_ioctl(0, f, cmd, arg); + return xfs_compat_ioctl(0, f, cmd, arg); } long -linvfs_compat_invis_ioctl( +xfs_file_compat_invis_ioctl( struct file *f, unsigned cmd, unsigned long arg) { - return __linvfs_compat_ioctl(IO_INVIS, f, cmd, arg); + return xfs_compat_ioctl(IO_INVIS, f, cmd, arg); } diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index 011c273bec5..02de6e62ee3 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -18,7 +18,7 @@ #ifndef __XFS_IOCTL32_H__ #define __XFS_IOCTL32_H__ -extern long linvfs_compat_ioctl(struct file *, unsigned, unsigned long); -extern long linvfs_compat_invis_ioctl(struct file *f, unsigned, unsigned long); +extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long); +extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long); #endif /* __XFS_IOCTL32_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index d7f6f2d8ac8..af487437bd7 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -106,7 +106,7 @@ xfs_ichgtime( xfs_inode_t *ip, int flags) { - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); timespec_t tv; nanotime(&tv); @@ -198,22 +198,22 @@ xfs_ichgtime_fast( * Pull the link count and size up from the xfs inode to the linux inode */ STATIC void -validate_fields( - struct inode *ip) +xfs_validate_fields( + struct inode *ip, + struct vattr *vattr) { - vnode_t *vp = LINVFS_GET_VP(ip); - vattr_t va; + vnode_t *vp = vn_from_inode(ip); int error; - va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; - VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); - if (likely(!error)) { - ip->i_nlink = va.va_nlink; - ip->i_blocks = va.va_nblocks; + vattr->va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; + VOP_GETATTR(vp, vattr, ATTR_LAZY, NULL, error); + if (likely(!error)) { + ip->i_nlink = vattr->va_nlink; + ip->i_blocks = vattr->va_nblocks; - /* we're under i_mutex so i_size can't change under us */ - if (i_size_read(ip) != va.va_size) - i_size_write(ip, va.va_size); + /* we're under i_sem so i_size can't change under us */ + if (i_size_read(ip) != vattr->va_size) + i_size_write(ip, vattr->va_size); } } @@ -224,11 +224,11 @@ validate_fields( * inode, of course, such that log replay can't cause these to be lost). */ STATIC int -linvfs_init_security( +xfs_init_security( struct vnode *vp, struct inode *dir) { - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); size_t length; void *value; char *name; @@ -257,46 +257,46 @@ linvfs_init_security( * XXX(hch): nfsd is broken, better fix it instead. */ STATIC inline int -has_fs_struct(struct task_struct *task) +xfs_has_fs_struct(struct task_struct *task) { return (task->fs != init_task.fs); } STATIC inline void -cleanup_inode( +xfs_cleanup_inode( vnode_t *dvp, vnode_t *vp, - struct dentry *dentry, + struct dentry *dentry, int mode) { struct dentry teardown = {}; - int err2; + int error; /* Oh, the horror. - * If we can't add the ACL or we fail in - * linvfs_init_security we must back out. + * If we can't add the ACL or we fail in + * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ - teardown.d_inode = LINVFS_GET_IP(vp); + teardown.d_inode = vn_to_inode(vp); teardown.d_name = dentry->d_name; if (S_ISDIR(mode)) - VOP_RMDIR(dvp, &teardown, NULL, err2); + VOP_RMDIR(dvp, &teardown, NULL, error); else - VOP_REMOVE(dvp, &teardown, NULL, err2); + VOP_REMOVE(dvp, &teardown, NULL, error); VN_RELE(vp); } STATIC int -linvfs_mknod( +xfs_vn_mknod( struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *ip; - vattr_t va; - vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); + vattr_t vattr = { 0 }; + vnode_t *vp = NULL, *dvp = vn_from_inode(dir); xfs_acl_t *default_acl = NULL; attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; int error; @@ -305,99 +305,98 @@ linvfs_mknod( * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ - if (!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff) + if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) return -EINVAL; - if (test_default_acl && test_default_acl(dvp)) { - if (!_ACL_ALLOC(default_acl)) + if (unlikely(test_default_acl && test_default_acl(dvp))) { + if (!_ACL_ALLOC(default_acl)) { return -ENOMEM; + } if (!_ACL_GET_DEFAULT(dvp, default_acl)) { _ACL_FREE(default_acl); default_acl = NULL; } } - if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current)) + if (IS_POSIXACL(dir) && !default_acl && xfs_has_fs_struct(current)) mode &= ~current->fs->umask; - memset(&va, 0, sizeof(va)); - va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; - va.va_mode = mode; + vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + vattr.va_mode = mode; switch (mode & S_IFMT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: - va.va_rdev = sysv_encode_dev(rdev); - va.va_mask |= XFS_AT_RDEV; + vattr.va_rdev = sysv_encode_dev(rdev); + vattr.va_mask |= XFS_AT_RDEV; /*FALLTHROUGH*/ case S_IFREG: - VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); + VOP_CREATE(dvp, dentry, &vattr, &vp, NULL, error); break; case S_IFDIR: - VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error); + VOP_MKDIR(dvp, dentry, &vattr, &vp, NULL, error); break; default: error = EINVAL; break; } - if (!error) - { - error = linvfs_init_security(vp, dir); + if (unlikely(!error)) { + error = xfs_init_security(vp, dir); if (error) - cleanup_inode(dvp, vp, dentry, mode); + xfs_cleanup_inode(dvp, vp, dentry, mode); } - if (default_acl) { + if (unlikely(default_acl)) { if (!error) { - error = _ACL_INHERIT(vp, &va, default_acl); - if (!error) + error = _ACL_INHERIT(vp, &vattr, default_acl); + if (!error) VMODIFY(vp); else - cleanup_inode(dvp, vp, dentry, mode); + xfs_cleanup_inode(dvp, vp, dentry, mode); } _ACL_FREE(default_acl); } - if (!error) { + if (likely(!error)) { ASSERT(vp); - ip = LINVFS_GET_IP(vp); + ip = vn_to_inode(vp); if (S_ISCHR(mode) || S_ISBLK(mode)) ip->i_rdev = rdev; else if (S_ISDIR(mode)) - validate_fields(ip); + xfs_validate_fields(ip, &vattr); d_instantiate(dentry, ip); - validate_fields(dir); + xfs_validate_fields(dir, &vattr); } return -error; } STATIC int -linvfs_create( +xfs_vn_create( struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { - return linvfs_mknod(dir, dentry, mode, 0); + return xfs_vn_mknod(dir, dentry, mode, 0); } STATIC int -linvfs_mkdir( +xfs_vn_mkdir( struct inode *dir, struct dentry *dentry, int mode) { - return linvfs_mknod(dir, dentry, mode|S_IFDIR, 0); + return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); } STATIC struct dentry * -linvfs_lookup( +xfs_vn_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - struct vnode *vp = LINVFS_GET_VP(dir), *cvp; + struct vnode *vp = vn_from_inode(dir), *cvp; int error; if (dentry->d_name.len >= MAXNAMELEN) @@ -411,11 +410,11 @@ linvfs_lookup( return NULL; } - return d_splice_alias(LINVFS_GET_IP(cvp), dentry); + return d_splice_alias(vn_to_inode(cvp), dentry); } STATIC int -linvfs_link( +xfs_vn_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) @@ -423,99 +422,102 @@ linvfs_link( struct inode *ip; /* inode of guy being linked to */ vnode_t *tdvp; /* target directory for new name/link */ vnode_t *vp; /* vp of name being linked */ + vattr_t vattr; int error; ip = old_dentry->d_inode; /* inode being linked to */ if (S_ISDIR(ip->i_mode)) return -EPERM; - tdvp = LINVFS_GET_VP(dir); - vp = LINVFS_GET_VP(ip); + tdvp = vn_from_inode(dir); + vp = vn_from_inode(ip); VOP_LINK(tdvp, vp, dentry, NULL, error); - if (!error) { + if (likely(!error)) { VMODIFY(tdvp); VN_HOLD(vp); - validate_fields(ip); + xfs_validate_fields(ip, &vattr); d_instantiate(dentry, ip); } return -error; } STATIC int -linvfs_unlink( +xfs_vn_unlink( struct inode *dir, struct dentry *dentry) { struct inode *inode; vnode_t *dvp; /* directory containing name to remove */ + vattr_t vattr; int error; inode = dentry->d_inode; - dvp = LINVFS_GET_VP(dir); + dvp = vn_from_inode(dir); VOP_REMOVE(dvp, dentry, NULL, error); - if (!error) { - validate_fields(dir); /* For size only */ - validate_fields(inode); + if (likely(!error)) { + xfs_validate_fields(dir, &vattr); /* size needs update */ + xfs_validate_fields(inode, &vattr); } - return -error; } STATIC int -linvfs_symlink( +xfs_vn_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *ip; - vattr_t va; + vattr_t vattr = { 0 }; vnode_t *dvp; /* directory containing name of symlink */ vnode_t *cvp; /* used to lookup symlink to put in dentry */ int error; - dvp = LINVFS_GET_VP(dir); + dvp = vn_from_inode(dir); cvp = NULL; - memset(&va, 0, sizeof(va)); - va.va_mode = S_IFLNK | + vattr.va_mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO); - va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE; error = 0; - VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); + VOP_SYMLINK(dvp, dentry, &vattr, (char *)symname, &cvp, NULL, error); if (likely(!error && cvp)) { - error = linvfs_init_security(cvp, dir); + error = xfs_init_security(cvp, dir); if (likely(!error)) { - ip = LINVFS_GET_IP(cvp); + ip = vn_to_inode(cvp); d_instantiate(dentry, ip); - validate_fields(dir); - validate_fields(ip); + xfs_validate_fields(dir, &vattr); + xfs_validate_fields(ip, &vattr); + } else { + xfs_cleanup_inode(dvp, cvp, dentry, 0); } } return -error; } STATIC int -linvfs_rmdir( +xfs_vn_rmdir( struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; - vnode_t *dvp = LINVFS_GET_VP(dir); + vnode_t *dvp = vn_from_inode(dir); + vattr_t vattr; int error; VOP_RMDIR(dvp, dentry, NULL, error); - if (!error) { - validate_fields(inode); - validate_fields(dir); + if (likely(!error)) { + xfs_validate_fields(inode, &vattr); + xfs_validate_fields(dir, &vattr); } return -error; } STATIC int -linvfs_rename( +xfs_vn_rename( struct inode *odir, struct dentry *odentry, struct inode *ndir, @@ -524,22 +526,21 @@ linvfs_rename( struct inode *new_inode = ndentry->d_inode; vnode_t *fvp; /* from directory */ vnode_t *tvp; /* target directory */ + vattr_t vattr; int error; - fvp = LINVFS_GET_VP(odir); - tvp = LINVFS_GET_VP(ndir); + fvp = vn_from_inode(odir); + tvp = vn_from_inode(ndir); VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error); - if (error) - return -error; - - if (new_inode) - validate_fields(new_inode); - - validate_fields(odir); - if (ndir != odir) - validate_fields(ndir); - return 0; + if (likely(!error)) { + if (new_inode) + xfs_validate_fields(new_inode, &vattr); + xfs_validate_fields(odir, &vattr); + if (ndir != odir) + xfs_validate_fields(ndir, &vattr); + } + return -error; } /* @@ -548,7 +549,7 @@ linvfs_rename( * uio is kmalloced for this reason... */ STATIC void * -linvfs_follow_link( +xfs_vn_follow_link( struct dentry *dentry, struct nameidata *nd) { @@ -574,7 +575,7 @@ linvfs_follow_link( return NULL; } - vp = LINVFS_GET_VP(dentry->d_inode); + vp = vn_from_inode(dentry->d_inode); iov.iov_base = link; iov.iov_len = MAXPATHLEN; @@ -599,7 +600,7 @@ linvfs_follow_link( } STATIC void -linvfs_put_link( +xfs_vn_put_link( struct dentry *dentry, struct nameidata *nd, void *p) @@ -612,12 +613,12 @@ linvfs_put_link( #ifdef CONFIG_XFS_POSIX_ACL STATIC int -linvfs_permission( +xfs_vn_permission( struct inode *inode, int mode, struct nameidata *nd) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error; mode <<= 6; /* convert from linux to vnode access bits */ @@ -625,17 +626,17 @@ linvfs_permission( return -error; } #else -#define linvfs_permission NULL +#define xfs_vn_permission NULL #endif STATIC int -linvfs_getattr( +xfs_vn_getattr( struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error = 0; if (unlikely(vp->v_flag & VMODIFIED)) @@ -646,18 +647,17 @@ linvfs_getattr( } STATIC int -linvfs_setattr( +xfs_vn_setattr( struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; unsigned int ia_valid = attr->ia_valid; - vnode_t *vp = LINVFS_GET_VP(inode); - vattr_t vattr; + vnode_t *vp = vn_from_inode(inode); + vattr_t vattr = { 0 }; int flags = 0; int error; - memset(&vattr, 0, sizeof(vattr_t)); if (ia_valid & ATTR_UID) { vattr.va_mask |= XFS_AT_UID; vattr.va_uid = attr->ia_uid; @@ -699,28 +699,27 @@ linvfs_setattr( #endif VOP_SETATTR(vp, &vattr, flags, NULL, error); - if (error) - return -error; - vn_revalidate(vp); - return error; + if (likely(!error)) + __vn_revalidate(vp, &vattr); + return -error; } STATIC void -linvfs_truncate( +xfs_vn_truncate( struct inode *inode) { - block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block); + block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_block); } STATIC int -linvfs_setxattr( +xfs_vn_setxattr( struct dentry *dentry, const char *name, const void *data, size_t size, int flags) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -744,13 +743,13 @@ linvfs_setxattr( } STATIC ssize_t -linvfs_getxattr( +xfs_vn_getxattr( struct dentry *dentry, const char *name, void *data, size_t size) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -774,12 +773,12 @@ linvfs_getxattr( } STATIC ssize_t -linvfs_listxattr( +xfs_vn_listxattr( struct dentry *dentry, char *data, size_t size) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); int error, xflags = ATTR_KERNAMELS; ssize_t result; @@ -794,11 +793,11 @@ linvfs_listxattr( } STATIC int -linvfs_removexattr( +xfs_vn_removexattr( struct dentry *dentry, const char *name) { - vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); + vnode_t *vp = vn_from_inode(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -816,45 +815,45 @@ linvfs_removexattr( } -struct inode_operations linvfs_file_inode_operations = { - .permission = linvfs_permission, - .truncate = linvfs_truncate, - .getattr = linvfs_getattr, - .setattr = linvfs_setattr, - .setxattr = linvfs_setxattr, - .getxattr = linvfs_getxattr, - .listxattr = linvfs_listxattr, - .removexattr = linvfs_removexattr, +struct inode_operations xfs_inode_operations = { + .permission = xfs_vn_permission, + .truncate = xfs_vn_truncate, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = xfs_vn_setxattr, + .getxattr = xfs_vn_getxattr, + .listxattr = xfs_vn_listxattr, + .removexattr = xfs_vn_removexattr, }; -struct inode_operations linvfs_dir_inode_operations = { - .create = linvfs_create, - .lookup = linvfs_lookup, - .link = linvfs_link, - .unlink = linvfs_unlink, - .symlink = linvfs_symlink, - .mkdir = linvfs_mkdir, - .rmdir = linvfs_rmdir, - .mknod = linvfs_mknod, - .rename = linvfs_rename, - .permission = linvfs_permission, - .getattr = linvfs_getattr, - .setattr = linvfs_setattr, - .setxattr = linvfs_setxattr, - .getxattr = linvfs_getxattr, - .listxattr = linvfs_listxattr, - .removexattr = linvfs_removexattr, +struct inode_operations xfs_dir_inode_operations = { + .create = xfs_vn_create, + .lookup = xfs_vn_lookup, + .link = xfs_vn_link, + .unlink = xfs_vn_unlink, + .symlink = xfs_vn_symlink, + .mkdir = xfs_vn_mkdir, + .rmdir = xfs_vn_rmdir, + .mknod = xfs_vn_mknod, + .rename = xfs_vn_rename, + .permission = xfs_vn_permission, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = xfs_vn_setxattr, + .getxattr = xfs_vn_getxattr, + .listxattr = xfs_vn_listxattr, + .removexattr = xfs_vn_removexattr, }; -struct inode_operations linvfs_symlink_inode_operations = { +struct inode_operations xfs_symlink_inode_operations = { .readlink = generic_readlink, - .follow_link = linvfs_follow_link, - .put_link = linvfs_put_link, - .permission = linvfs_permission, - .getattr = linvfs_getattr, - .setattr = linvfs_setattr, - .setxattr = linvfs_setxattr, - .getxattr = linvfs_getxattr, - .listxattr = linvfs_listxattr, - .removexattr = linvfs_removexattr, + .follow_link = xfs_vn_follow_link, + .put_link = xfs_vn_put_link, + .permission = xfs_vn_permission, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = xfs_vn_setxattr, + .getxattr = xfs_vn_getxattr, + .listxattr = xfs_vn_listxattr, + .removexattr = xfs_vn_removexattr, }; diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h index 6899a6b4a50..a8417d7af5f 100644 --- a/fs/xfs/linux-2.6/xfs_iops.h +++ b/fs/xfs/linux-2.6/xfs_iops.h @@ -18,13 +18,13 @@ #ifndef __XFS_IOPS_H__ #define __XFS_IOPS_H__ -extern struct inode_operations linvfs_file_inode_operations; -extern struct inode_operations linvfs_dir_inode_operations; -extern struct inode_operations linvfs_symlink_inode_operations; +extern struct inode_operations xfs_inode_operations; +extern struct inode_operations xfs_dir_inode_operations; +extern struct inode_operations xfs_symlink_inode_operations; -extern struct file_operations linvfs_file_operations; -extern struct file_operations linvfs_invis_file_operations; -extern struct file_operations linvfs_dir_operations; +extern struct file_operations xfs_file_operations; +extern struct file_operations xfs_dir_file_operations; +extern struct file_operations xfs_invis_file_operations; extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, int, unsigned int, void __user *); diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 67389b74552..1fe09f2d651 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -73,6 +73,9 @@ #include <linux/list.h> #include <linux/proc_fs.h> #include <linux/sort.h> +#include <linux/cpu.h> +#include <linux/notifier.h> +#include <linux/delay.h> #include <asm/page.h> #include <asm/div64.h> @@ -100,6 +103,11 @@ */ #undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */ #define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */ +#ifdef CONFIG_SMP +#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ +#else +#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ +#endif /* * State flag for unwritten extent buffers. @@ -226,7 +234,7 @@ BUFFER_FNS(PrivateStart, unwritten); #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) #define xfs_stack_trace() dump_stack() #define xfs_itruncate_data(ip, off) \ - (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) + (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) #define xfs_statvfs_fsid(statp, mp) \ ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \ __kernel_fsid_t *fsid = &(statp)->f_fsid; \ diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index e0ab45fbfeb..0169360475c 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -83,7 +83,7 @@ xfs_rw_enter_trace( (void *)((unsigned long)ioflags), (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), (void *)((unsigned long)(io->io_new_size & 0xffffffff)), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL, (void *)NULL, (void *)NULL, @@ -113,7 +113,7 @@ xfs_inval_cached_trace( (void *)((unsigned long)(first & 0xffffffff)), (void *)((unsigned long)((last >> 32) & 0xffffffff)), (void *)((unsigned long)(last & 0xffffffff)), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL, (void *)NULL, (void *)NULL, @@ -249,9 +249,8 @@ xfs_read( if (n < size) size = n; - if (XFS_FORCED_SHUTDOWN(mp)) { + if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; - } if (unlikely(ioflags & IO_ISDIRECT)) mutex_lock(&inode->i_mutex); @@ -267,10 +266,14 @@ xfs_read( dmflags, &locktype); if (ret) { xfs_iunlock(ip, XFS_IOLOCK_SHARED); - goto unlock_isem; + goto unlock_mutex; } } + if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp))) + VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)), + -1, FI_REMAPF_LOCKED); + xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, (void *)iovp, segs, *offset, ioflags); ret = __generic_file_aio_read(iocb, iovp, segs, offset); @@ -281,7 +284,7 @@ xfs_read( xfs_iunlock(ip, XFS_IOLOCK_SHARED); -unlock_isem: +unlock_mutex: if (unlikely(ioflags & IO_ISDIRECT)) mutex_unlock(&inode->i_mutex); return ret; @@ -432,7 +435,7 @@ xfs_zero_eof( xfs_fsize_t isize, /* current inode size */ xfs_fsize_t end_size) /* terminal inode size */ { - struct inode *ip = LINVFS_GET_IP(vp); + struct inode *ip = vn_to_inode(vp); xfs_fileoff_t start_zero_fsb; xfs_fileoff_t end_zero_fsb; xfs_fileoff_t zero_count_fsb; @@ -573,7 +576,7 @@ xfs_write( vrwlock_t locktype; size_t ocount = 0, count; loff_t pos; - int need_isem = 1, need_flush = 0; + int need_i_mutex = 1, need_flush = 0; XFS_STATS_INC(xs_write_calls); @@ -622,14 +625,14 @@ xfs_write( return XFS_ERROR(-EINVAL); if (!VN_CACHED(vp) && pos < i_size_read(inode)) - need_isem = 0; + need_i_mutex = 0; if (VN_CACHED(vp)) need_flush = 1; } relock: - if (need_isem) { + if (need_i_mutex) { iolock = XFS_IOLOCK_EXCL; locktype = VRWLOCK_WRITE; @@ -651,7 +654,7 @@ start: S_ISBLK(inode->i_mode)); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } new_size = pos + count; @@ -663,7 +666,7 @@ start: loff_t savedsize = pos; int dmflags = FILP_DELAY_FLAG(file); - if (need_isem) + if (need_i_mutex) dmflags |= DM_FLAGS_IMUX; xfs_iunlock(xip, XFS_ILOCK_EXCL); @@ -672,7 +675,7 @@ start: dmflags, &locktype); if (error) { xfs_iunlock(xip, iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } xfs_ilock(xip, XFS_ILOCK_EXCL); eventsent = 1; @@ -710,7 +713,7 @@ start: isize, pos + count); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } } xfs_iunlock(xip, XFS_ILOCK_EXCL); @@ -731,7 +734,7 @@ start: error = -remove_suid(file->f_dentry); if (unlikely(error)) { xfs_iunlock(xip, iolock); - goto out_unlock_isem; + goto out_unlock_mutex; } } @@ -747,14 +750,14 @@ retry: -1, FI_REMAPF_LOCKED); } - if (need_isem) { + if (need_i_mutex) { /* demote the lock now the cached pages are gone */ XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); mutex_unlock(&inode->i_mutex); iolock = XFS_IOLOCK_SHARED; locktype = VRWLOCK_WRITE_DIRECT; - need_isem = 0; + need_i_mutex = 0; } xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, @@ -772,7 +775,7 @@ retry: pos += ret; count -= ret; - need_isem = 1; + need_i_mutex = 1; ioflags &= ~IO_ISDIRECT; xfs_iunlock(xip, iolock); goto relock; @@ -794,14 +797,14 @@ retry: !(ioflags & IO_INVIS)) { xfs_rwunlock(bdp, locktype); - if (need_isem) + if (need_i_mutex) mutex_unlock(&inode->i_mutex); error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ if (error) goto out_nounlocks; - if (need_isem) + if (need_i_mutex) mutex_lock(&inode->i_mutex); xfs_rwlock(bdp, locktype); pos = xip->i_d.di_size; @@ -905,9 +908,9 @@ retry: if (error) goto out_unlock_internal; } - + xfs_rwunlock(bdp, locktype); - if (need_isem) + if (need_i_mutex) mutex_unlock(&inode->i_mutex); error = sync_page_range(inode, mapping, pos, ret); @@ -918,8 +921,8 @@ retry: out_unlock_internal: xfs_rwunlock(bdp, locktype); - out_unlock_isem: - if (need_isem) + out_unlock_mutex: + if (need_i_mutex) mutex_unlock(&inode->i_mutex); out_nounlocks: return -error; diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index 8955720a2c6..713e6a7505d 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c @@ -62,18 +62,15 @@ xfs_read_xfsstats( while (j < xstats[i].endpoint) { val = 0; /* sum over all cpus */ - for (c = 0; c < NR_CPUS; c++) { - if (!cpu_possible(c)) continue; + for_each_cpu(c) val += *(((__u32*)&per_cpu(xfsstats, c) + j)); - } len += sprintf(buffer + len, " %u", val); j++; } buffer[len++] = '\n'; } /* extra precision counters */ - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) continue; + for_each_cpu(i) { xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index f22e426d9e4..8355faf8ffd 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -59,8 +59,8 @@ #include <linux/writeback.h> #include <linux/kthread.h> -STATIC struct quotactl_ops linvfs_qops; -STATIC struct super_operations linvfs_sops; +STATIC struct quotactl_ops xfs_quotactl_operations; +STATIC struct super_operations xfs_super_operations; STATIC kmem_zone_t *xfs_vnode_zone; STATIC kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; @@ -76,8 +76,6 @@ xfs_args_allocate( strncpy(args->fsname, sb->s_id, MAXNAMELEN); /* Copy the already-parsed mount(2) flags we're interested in */ - if (sb->s_flags & MS_NOATIME) - args->flags |= XFSMNT_NOATIME; if (sb->s_flags & MS_DIRSYNC) args->flags |= XFSMNT_DIRSYNC; if (sb->s_flags & MS_SYNCHRONOUS) @@ -129,21 +127,21 @@ xfs_set_inodeops( { switch (inode->i_mode & S_IFMT) { case S_IFREG: - inode->i_op = &linvfs_file_inode_operations; - inode->i_fop = &linvfs_file_operations; - inode->i_mapping->a_ops = &linvfs_aops; + inode->i_op = &xfs_inode_operations; + inode->i_fop = &xfs_file_operations; + inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: - inode->i_op = &linvfs_dir_inode_operations; - inode->i_fop = &linvfs_dir_operations; + inode->i_op = &xfs_dir_inode_operations; + inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: - inode->i_op = &linvfs_symlink_inode_operations; + inode->i_op = &xfs_symlink_inode_operations; if (inode->i_blocks) - inode->i_mapping->a_ops = &linvfs_aops; + inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: - inode->i_op = &linvfs_file_inode_operations; + inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } @@ -155,7 +153,7 @@ xfs_revalidate_inode( vnode_t *vp, xfs_inode_t *ip) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); inode->i_mode = ip->i_d.di_mode; inode->i_nlink = ip->i_d.di_nlink; @@ -212,7 +210,7 @@ xfs_initialize_vnode( int unlock) { xfs_inode_t *ip = XFS_BHVTOI(inode_bhv); - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (!inode_bhv->bd_vobj) { vp->v_vfsp = bhvtovfs(bdp); @@ -230,7 +228,7 @@ xfs_initialize_vnode( if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) { xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); xfs_set_inodeops(inode); - + ip->i_flags &= ~XFS_INEW; barrier(); @@ -334,43 +332,42 @@ xfs_blkdev_issue_flush( } STATIC struct inode * -linvfs_alloc_inode( +xfs_fs_alloc_inode( struct super_block *sb) { vnode_t *vp; - vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP)); - if (!vp) + vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); + if (unlikely(!vp)) return NULL; - return LINVFS_GET_IP(vp); + return vn_to_inode(vp); } STATIC void -linvfs_destroy_inode( +xfs_fs_destroy_inode( struct inode *inode) { - kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode)); + kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); } STATIC void -linvfs_inode_init_once( - void *data, - kmem_cache_t *cachep, +xfs_fs_inode_init_once( + void *vnode, + kmem_zone_t *zonep, unsigned long flags) { - vnode_t *vp = (vnode_t *)data; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - inode_init_once(LINVFS_GET_IP(vp)); + SLAB_CTOR_CONSTRUCTOR) + inode_init_once(vn_to_inode((vnode_t *)vnode)); } STATIC int -linvfs_init_zones(void) +xfs_init_zones(void) { - xfs_vnode_zone = kmem_cache_create("xfs_vnode", - sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, - linvfs_inode_init_once, NULL); + xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t", + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | + KM_ZONE_SPREAD, + xfs_fs_inode_init_once); if (!xfs_vnode_zone) goto out; @@ -379,14 +376,12 @@ linvfs_init_zones(void) goto out_destroy_vnode_zone; xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, - mempool_alloc_slab, mempool_free_slab, - xfs_ioend_zone); + mempool_alloc_slab, mempool_free_slab, + xfs_ioend_zone); if (!xfs_ioend_pool) goto out_free_ioend_zone; - return 0; - out_free_ioend_zone: kmem_zone_destroy(xfs_ioend_zone); out_destroy_vnode_zone: @@ -396,7 +391,7 @@ linvfs_init_zones(void) } STATIC void -linvfs_destroy_zones(void) +xfs_destroy_zones(void) { mempool_destroy(xfs_ioend_pool); kmem_zone_destroy(xfs_vnode_zone); @@ -407,14 +402,14 @@ linvfs_destroy_zones(void) * Attempt to flush the inode, this will actually fail * if the inode is pinned, but we dirty the inode again * at the point when it is unpinned after a log write, - * since this is when the inode itself becomes flushable. + * since this is when the inode itself becomes flushable. */ STATIC int -linvfs_write_inode( +xfs_fs_write_inode( struct inode *inode, int sync) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error = 0, flags = FLUSH_INODE; if (vp) { @@ -434,13 +429,13 @@ linvfs_write_inode( } STATIC void -linvfs_clear_inode( +xfs_fs_clear_inode( struct inode *inode) { - vnode_t *vp = LINVFS_GET_VP(inode); + vnode_t *vp = vn_from_inode(inode); int error, cache; - vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); XFS_STATS_INC(vn_rele); XFS_STATS_INC(vn_remove); @@ -516,7 +511,7 @@ void xfs_flush_inode( xfs_inode_t *ip) { - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); struct vfs *vfs = XFS_MTOVFS(ip->i_mount); igrab(inode); @@ -541,7 +536,7 @@ void xfs_flush_device( xfs_inode_t *ip) { - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); struct vfs *vfs = XFS_MTOVFS(ip->i_mount); igrab(inode); @@ -550,7 +545,7 @@ xfs_flush_device( xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); } -#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR) +#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE) STATIC void vfs_sync_worker( vfs_t *vfsp, @@ -613,7 +608,7 @@ xfssyncd( } STATIC int -linvfs_start_syncd( +xfs_fs_start_syncd( vfs_t *vfsp) { vfsp->vfs_sync_work.w_syncer = vfs_sync_worker; @@ -625,20 +620,20 @@ linvfs_start_syncd( } STATIC void -linvfs_stop_syncd( +xfs_fs_stop_syncd( vfs_t *vfsp) { kthread_stop(vfsp->vfs_sync_task); } STATIC void -linvfs_put_super( +xfs_fs_put_super( struct super_block *sb) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; - linvfs_stop_syncd(vfsp); + xfs_fs_stop_syncd(vfsp); VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error); if (!error) VFS_UNMOUNT(vfsp, 0, NULL, error); @@ -652,10 +647,10 @@ linvfs_put_super( } STATIC void -linvfs_write_super( +xfs_fs_write_super( struct super_block *sb) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; if (sb->s_flags & MS_RDONLY) { @@ -668,11 +663,11 @@ linvfs_write_super( } STATIC int -linvfs_sync_super( +xfs_fs_sync_super( struct super_block *sb, int wait) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; int flags = SYNC_FSDATA; @@ -707,11 +702,11 @@ linvfs_sync_super( } STATIC int -linvfs_statfs( +xfs_fs_statfs( struct super_block *sb, struct kstatfs *statp) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); int error; VFS_STATVFS(vfsp, statp, NULL, error); @@ -719,12 +714,12 @@ linvfs_statfs( } STATIC int -linvfs_remount( +xfs_fs_remount( struct super_block *sb, int *flags, char *options) { - vfs_t *vfsp = LINVFS_GET_VFS(sb); + vfs_t *vfsp = vfs_from_sb(sb); struct xfs_mount_args *args = xfs_args_allocate(sb); int error; @@ -736,18 +731,18 @@ linvfs_remount( } STATIC void -linvfs_freeze_fs( +xfs_fs_lockfs( struct super_block *sb) { - VFS_FREEZE(LINVFS_GET_VFS(sb)); + VFS_FREEZE(vfs_from_sb(sb)); } STATIC int -linvfs_show_options( +xfs_fs_show_options( struct seq_file *m, struct vfsmount *mnt) { - struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb); + struct vfs *vfsp = vfs_from_sb(mnt->mnt_sb); int error; VFS_SHOWARGS(vfsp, m, error); @@ -755,11 +750,11 @@ linvfs_show_options( } STATIC int -linvfs_quotasync( +xfs_fs_quotasync( struct super_block *sb, int type) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error; VFS_QUOTACTL(vfsp, Q_XQUOTASYNC, 0, (caddr_t)NULL, error); @@ -767,11 +762,11 @@ linvfs_quotasync( } STATIC int -linvfs_getxstate( +xfs_fs_getxstate( struct super_block *sb, struct fs_quota_stat *fqs) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error; VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error); @@ -779,12 +774,12 @@ linvfs_getxstate( } STATIC int -linvfs_setxstate( +xfs_fs_setxstate( struct super_block *sb, unsigned int flags, int op) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error; VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error); @@ -792,13 +787,13 @@ linvfs_setxstate( } STATIC int -linvfs_getxquota( +xfs_fs_getxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error, getmode; getmode = (type == USRQUOTA) ? Q_XGETQUOTA : @@ -808,13 +803,13 @@ linvfs_getxquota( } STATIC int -linvfs_setxquota( +xfs_fs_setxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { - struct vfs *vfsp = LINVFS_GET_VFS(sb); + struct vfs *vfsp = vfs_from_sb(sb); int error, setmode; setmode = (type == USRQUOTA) ? Q_XSETQLIM : @@ -824,21 +819,17 @@ linvfs_setxquota( } STATIC int -linvfs_fill_super( +xfs_fs_fill_super( struct super_block *sb, void *data, int silent) { vnode_t *rootvp; - struct vfs *vfsp = vfs_allocate(); + struct vfs *vfsp = vfs_allocate(sb); struct xfs_mount_args *args = xfs_args_allocate(sb); struct kstatfs statvfs; int error, error2; - vfsp->vfs_super = sb; - LINVFS_SET_VFS(sb, vfsp); - if (sb->s_flags & MS_RDONLY) - vfsp->vfs_flag |= VFS_RDONLY; bhv_insert_all_vfsops(vfsp); VFS_PARSEARGS(vfsp, (char *)data, args, 0, error); @@ -849,10 +840,10 @@ linvfs_fill_super( sb_min_blocksize(sb, BBSIZE); #ifdef CONFIG_XFS_EXPORT - sb->s_export_op = &linvfs_export_ops; + sb->s_export_op = &xfs_export_operations; #endif - sb->s_qcop = &linvfs_qops; - sb->s_op = &linvfs_sops; + sb->s_qcop = &xfs_quotactl_operations; + sb->s_op = &xfs_super_operations; VFS_MOUNT(vfsp, args, NULL, error); if (error) { @@ -876,7 +867,7 @@ linvfs_fill_super( if (error) goto fail_unmount; - sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp)); + sb->s_root = d_alloc_root(vn_to_inode(rootvp)); if (!sb->s_root) { error = ENOMEM; goto fail_vnrele; @@ -885,7 +876,7 @@ linvfs_fill_super( error = EINVAL; goto fail_vnrele; } - if ((error = linvfs_start_syncd(vfsp))) + if ((error = xfs_fs_start_syncd(vfsp))) goto fail_vnrele; vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); @@ -910,41 +901,41 @@ fail_vfsop: } STATIC struct super_block * -linvfs_get_sb( +xfs_fs_get_sb( struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super); -} - -STATIC struct super_operations linvfs_sops = { - .alloc_inode = linvfs_alloc_inode, - .destroy_inode = linvfs_destroy_inode, - .write_inode = linvfs_write_inode, - .clear_inode = linvfs_clear_inode, - .put_super = linvfs_put_super, - .write_super = linvfs_write_super, - .sync_fs = linvfs_sync_super, - .write_super_lockfs = linvfs_freeze_fs, - .statfs = linvfs_statfs, - .remount_fs = linvfs_remount, - .show_options = linvfs_show_options, + return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); +} + +STATIC struct super_operations xfs_super_operations = { + .alloc_inode = xfs_fs_alloc_inode, + .destroy_inode = xfs_fs_destroy_inode, + .write_inode = xfs_fs_write_inode, + .clear_inode = xfs_fs_clear_inode, + .put_super = xfs_fs_put_super, + .write_super = xfs_fs_write_super, + .sync_fs = xfs_fs_sync_super, + .write_super_lockfs = xfs_fs_lockfs, + .statfs = xfs_fs_statfs, + .remount_fs = xfs_fs_remount, + .show_options = xfs_fs_show_options, }; -STATIC struct quotactl_ops linvfs_qops = { - .quota_sync = linvfs_quotasync, - .get_xstate = linvfs_getxstate, - .set_xstate = linvfs_setxstate, - .get_xquota = linvfs_getxquota, - .set_xquota = linvfs_setxquota, +STATIC struct quotactl_ops xfs_quotactl_operations = { + .quota_sync = xfs_fs_quotasync, + .get_xstate = xfs_fs_getxstate, + .set_xstate = xfs_fs_setxstate, + .get_xquota = xfs_fs_getxquota, + .set_xquota = xfs_fs_setxquota, }; STATIC struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", - .get_sb = linvfs_get_sb, + .get_sb = xfs_fs_get_sb, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; @@ -965,7 +956,7 @@ init_xfs_fs( void ) ktrace_init(64); - error = linvfs_init_zones(); + error = xfs_init_zones(); if (error < 0) goto undo_zones; @@ -981,14 +972,13 @@ init_xfs_fs( void ) error = register_filesystem(&xfs_fs_type); if (error) goto undo_register; - XFS_DM_INIT(&xfs_fs_type); return 0; undo_register: xfs_buf_terminate(); undo_buffers: - linvfs_destroy_zones(); + xfs_destroy_zones(); undo_zones: return error; @@ -998,11 +988,10 @@ STATIC void __exit exit_xfs_fs( void ) { vfs_exitquota(); - XFS_DM_EXIT(&xfs_fs_type); unregister_filesystem(&xfs_fs_type); xfs_cleanup(); xfs_buf_terminate(); - linvfs_destroy_zones(); + xfs_destroy_zones(); ktrace_uninit(); } diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index df59408dca0..376b96cb513 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h @@ -98,11 +98,6 @@ extern void xfs_qm_exit(void); XFS_DMAPI_STRING \ XFS_DBG_STRING /* DBG must be last */ -#define LINVFS_GET_VFS(s) \ - (vfs_t *)((s)->s_fs_info) -#define LINVFS_SET_VFS(s, vfsp) \ - ((s)->s_fs_info = vfsp) - struct xfs_inode; struct xfs_mount; struct xfs_buftarg; @@ -120,6 +115,6 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *, extern void xfs_blkdev_put(struct block_device *); extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); -extern struct export_operations linvfs_export_ops; +extern struct export_operations xfs_export_operations; #endif /* __XFS_SUPER_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index a0256497242..7079cc83721 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler( if (!ret && write && *valp) { printk("XFS Clearing xfsstats\n"); - for (c = 0; c < NR_CPUS; c++) { - if (!cpu_possible(c)) continue; + for_each_cpu(c) { preempt_disable(); /* save vn_active, it's a universal truth! */ vn_active = per_cpu(xfsstats, c).vn_active; diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c index c855d62e534..6f7c9f7a862 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.c +++ b/fs/xfs/linux-2.6/xfs_vfs.c @@ -227,7 +227,8 @@ vfs_freeze( } vfs_t * -vfs_allocate( void ) +vfs_allocate( + struct super_block *sb) { struct vfs *vfsp; @@ -236,9 +237,23 @@ vfs_allocate( void ) INIT_LIST_HEAD(&vfsp->vfs_sync_list); spin_lock_init(&vfsp->vfs_sync_lock); init_waitqueue_head(&vfsp->vfs_wait_single_sync_task); + + vfsp->vfs_super = sb; + sb->s_fs_info = vfsp; + + if (sb->s_flags & MS_RDONLY) + vfsp->vfs_flag |= VFS_RDONLY; + return vfsp; } +vfs_t * +vfs_from_sb( + struct super_block *sb) +{ + return (vfs_t *)sb->s_fs_info; +} + void vfs_deallocate( struct vfs *vfsp) @@ -295,7 +310,7 @@ bhv_remove_all_vfsops( bhv_remove_vfsops(vfsp, VFS_POSITION_DM); if (!freebase) return; - mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops)); + mp = XFS_VFSTOM(vfsp); VFS_REMOVEBHV(vfsp, &mp->m_bhv); xfs_mount_free(mp, 0); } diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h index 57caf9eddee..8fed356db05 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.h +++ b/fs/xfs/linux-2.6/xfs_vfs.h @@ -193,7 +193,8 @@ typedef struct bhv_vfsops { #define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o)) #define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL ) -extern vfs_t *vfs_allocate(void); +extern vfs_t *vfs_allocate(struct super_block *); +extern vfs_t *vfs_from_sb(struct super_block *); extern void vfs_deallocate(vfs_t *); extern void vfs_insertops(vfs_t *, bhv_vfsops_t *); extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *); diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index 260dd8415dd..d27c25b27cc 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c @@ -58,7 +58,7 @@ struct vnode * vn_initialize( struct inode *inode) { - struct vnode *vp = LINVFS_GET_VP(inode); + struct vnode *vp = vn_from_inode(inode); XFS_STATS_INC(vn_active); XFS_STATS_INC(vn_alloc); @@ -83,7 +83,7 @@ vn_initialize( vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); #endif /* XFS_VNODE_TRACE */ - vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); + vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address); return vp; } @@ -97,7 +97,7 @@ vn_revalidate_core( struct vnode *vp, vattr_t *vap) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); inode->i_mode = vap->va_mode; inode->i_nlink = vap->va_nlink; @@ -129,24 +129,31 @@ vn_revalidate_core( * Revalidate the Linux inode from the vnode. */ int -vn_revalidate( - struct vnode *vp) +__vn_revalidate( + struct vnode *vp, + struct vattr *vattr) { - vattr_t va; int error; - vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address); - ASSERT(vp->v_fbhv != NULL); - - va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS; - VOP_GETATTR(vp, &va, 0, NULL, error); - if (!error) { - vn_revalidate_core(vp, &va); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS; + VOP_GETATTR(vp, vattr, 0, NULL, error); + if (likely(!error)) { + vn_revalidate_core(vp, vattr); VUNMODIFY(vp); } return -error; } +int +vn_revalidate( + struct vnode *vp) +{ + vattr_t vattr; + + return __vn_revalidate(vp, &vattr); +} + /* * Add a reference to a referenced vnode. */ @@ -159,7 +166,7 @@ vn_hold( XFS_STATS_INC(vn_hold); VN_LOCK(vp); - inode = igrab(LINVFS_GET_IP(vp)); + inode = igrab(vn_to_inode(vp)); ASSERT(inode); VN_UNLOCK(vp, 0); diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 0fe2419461d..06f5845e956 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -116,8 +116,14 @@ typedef enum { /* * Vnode to Linux inode mapping. */ -#define LINVFS_GET_VP(inode) ((vnode_t *)list_entry(inode, vnode_t, v_inode)) -#define LINVFS_GET_IP(vp) (&(vp)->v_inode) +static inline struct vnode *vn_from_inode(struct inode *inode) +{ + return (vnode_t *)list_entry(inode, vnode_t, v_inode); +} +static inline struct inode *vn_to_inode(struct vnode *vnode) +{ + return &vnode->v_inode; +} /* * Vnode flags. @@ -490,6 +496,7 @@ typedef struct vnode_map { (vmap).v_ino = (vp)->v_inode.i_ino; } extern int vn_revalidate(struct vnode *); +extern int __vn_revalidate(struct vnode *, vattr_t *); extern void vn_revalidate_core(struct vnode *, vattr_t *); extern void vn_iowait(struct vnode *vp); @@ -497,7 +504,7 @@ extern void vn_iowake(struct vnode *vp); static inline int vn_count(struct vnode *vp) { - return atomic_read(&LINVFS_GET_IP(vp)->i_count); + return atomic_read(&vn_to_inode(vp)->i_count); } /* @@ -511,16 +518,16 @@ extern vnode_t *vn_hold(struct vnode *); vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address)) #define VN_RELE(vp) \ (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \ - iput(LINVFS_GET_IP(vp))) + iput(vn_to_inode(vp))) #else #define VN_HOLD(vp) ((void)vn_hold(vp)) -#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) +#define VN_RELE(vp) (iput(vn_to_inode(vp))) #endif static inline struct vnode *vn_grab(struct vnode *vp) { - struct inode *inode = igrab(LINVFS_GET_IP(vp)); - return inode ? LINVFS_GET_VP(inode) : NULL; + struct inode *inode = igrab(vn_to_inode(vp)); + return inode ? vn_from_inode(inode) : NULL; } /* @@ -528,7 +535,7 @@ static inline struct vnode *vn_grab(struct vnode *vp) */ #define VNAME(dentry) ((char *) (dentry)->d_name.name) #define VNAMELEN(dentry) ((dentry)->d_name.len) -#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode)) +#define VNAME_TO_VNODE(dentry) (vn_from_inode((dentry)->d_inode)) /* * Vnode spinlock manipulation. @@ -557,12 +564,12 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag) */ static inline void vn_mark_bad(struct vnode *vp) { - make_bad_inode(LINVFS_GET_IP(vp)); + make_bad_inode(vn_to_inode(vp)); } static inline int VN_BAD(struct vnode *vp) { - return is_bad_inode(LINVFS_GET_IP(vp)); + return is_bad_inode(vn_to_inode(vp)); } /* @@ -587,9 +594,9 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt) /* * Some useful predicates. */ -#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping) -#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages) -#define VN_DIRTY(vp) mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \ +#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping) +#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) +#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ PAGECACHE_TAG_DIRTY) #define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED) #define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED) diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 2ec6b441849..e4e5f05b841 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -79,9 +79,11 @@ xfs_qm_dquot_logitem_format( logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; logvec->i_len = sizeof(xfs_dq_logformat_t); + XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT); logvec++; logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; logvec->i_len = sizeof(xfs_disk_dquot_t); + XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT); ASSERT(2 == logitem->qli_item.li_desc->lid_size); logitem->qli_format.qlf_size = 2; diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 7c0e39dc618..1fb757ef3f4 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -1704,9 +1704,9 @@ xfs_qm_get_rtblks( xfs_qcnt_t *O_rtblks) { xfs_filblks_t rtblks; /* total rt blks */ + xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t nextents; /* number of extent entries */ - xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ int error; @@ -1717,10 +1717,11 @@ xfs_qm_get_rtblks( return error; } rtblks = 0; - nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); - base = &ifp->if_u1.if_extents[0]; - for (ep = base; ep < &base[nextents]; ep++) + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + for (idx = 0; idx < nextents; idx++) { + ep = xfs_iext_get_ext(ifp, idx); rtblks += xfs_bmbt_get_blockcount(ep); + } *O_rtblks = (xfs_qcnt_t)rtblks; return 0; } @@ -2788,9 +2789,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql) xfs_qm_dqdestroy(dqp); dqp = nextdqp; } - /* - * Don't bother about unlocking. - */ + mutex_unlock(&ql->qh_lock); mutex_destroy(&ql->qh_lock); ASSERT(ql->qh_nelems == 0); diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index 90402a1c398..6838b36d95a 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c @@ -374,7 +374,7 @@ xfs_qm_exit(void) vfs_bhv_clr_custom(&xfs_qmops); xfs_qm_cleanup_procfs(); if (qm_dqzone) - kmem_cache_destroy(qm_dqzone); + kmem_zone_destroy(qm_dqzone); if (qm_dqtrxzone) - kmem_cache_destroy(qm_dqtrxzone); + kmem_zone_destroy(qm_dqtrxzone); } diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index 841aa4c15b8..addf5a7ea06 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c @@ -39,8 +39,8 @@ ktrace_init(int zentries) void ktrace_uninit(void) { - kmem_cache_destroy(ktrace_hdr_zone); - kmem_cache_destroy(ktrace_ent_zone); + kmem_zone_destroy(ktrace_hdr_zone); + kmem_zone_destroy(ktrace_ent_zone); } /* diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c index a3d565a6773..e157015c70f 100644 --- a/fs/xfs/support/uuid.c +++ b/fs/xfs/support/uuid.c @@ -21,13 +21,6 @@ static mutex_t uuid_monitor; static int uuid_table_size; static uuid_t *uuid_table; -void -uuid_init(void) -{ - mutex_init(&uuid_monitor); -} - - /* IRIX interpretation of an uuid_t */ typedef struct { __be32 uu_timelow; @@ -50,7 +43,7 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) | be16_to_cpu(uup->uu_timemid); - fsid[1] = be16_to_cpu(uup->uu_timelow); + fsid[1] = be32_to_cpu(uup->uu_timelow); } void @@ -139,3 +132,9 @@ uuid_table_remove(uuid_t *uuid) ASSERT(i < uuid_table_size); mutex_unlock(&uuid_monitor); } + +void +uuid_init(void) +{ + mutex_init(&uuid_monitor); +} diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index f9315bc960c..538d0d65b04 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h @@ -55,8 +55,8 @@ struct xfs_inode; extern struct kmem_zone *xfs_acl_zone; #define xfs_acl_zone_init(zone, name) \ - (zone) = kmem_zone_init(sizeof(xfs_acl_t), name) -#define xfs_acl_zone_destroy(zone) kmem_cache_destroy(zone) + (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) +#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *); extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index e5e91e9c7e8..093fac476bd 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -1127,8 +1127,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context) return(error); ASSERT(bp != NULL); leaf = bp->data; - if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - != XFS_ATTR_LEAF_MAGIC)) { + if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, leaf); xfs_da_brelse(NULL, bp); @@ -1541,8 +1540,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) XFS_ATTR_FORK); if (error) goto out; - ASSERT(INT_GET(((xfs_attr_leafblock_t *) - bp->data)->hdr.info.magic, ARCH_CONVERT) + ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *) + bp->data)->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { @@ -1763,7 +1762,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(error); if (bp) { node = bp->data; - switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) { + switch (be16_to_cpu(node->hdr.info.magic)) { case XFS_DA_NODE_MAGIC: xfs_attr_trace_l_cn("wrong blk", context, node); xfs_da_brelse(NULL, bp); @@ -1771,18 +1770,14 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) break; case XFS_ATTR_LEAF_MAGIC: leaf = bp->data; - if (cursor->hashval > - INT_GET(leaf->entries[ - INT_GET(leaf->hdr.count, - ARCH_CONVERT)-1].hashval, - ARCH_CONVERT)) { + if (cursor->hashval > be32_to_cpu(leaf->entries[ + be16_to_cpu(leaf->hdr.count)-1].hashval)) { xfs_attr_trace_l_cl("wrong blk", context, leaf); xfs_da_brelse(NULL, bp); bp = NULL; } else if (cursor->hashval <= - INT_GET(leaf->entries[0].hashval, - ARCH_CONVERT)) { + be32_to_cpu(leaf->entries[0].hashval)) { xfs_attr_trace_l_cl("maybe wrong blk", context, leaf); xfs_da_brelse(NULL, bp); @@ -1817,10 +1812,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } node = bp->data; - if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) + if (be16_to_cpu(node->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) break; - if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT) + if (unlikely(be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, @@ -1830,19 +1825,17 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } btree = node->btree; - for (i = 0; - i < INT_GET(node->hdr.count, ARCH_CONVERT); + for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) { if (cursor->hashval - <= INT_GET(btree->hashval, - ARCH_CONVERT)) { - cursor->blkno = INT_GET(btree->before, ARCH_CONVERT); + <= be32_to_cpu(btree->hashval)) { + cursor->blkno = be32_to_cpu(btree->before); xfs_attr_trace_l_cb("descending", context, btree); break; } } - if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + if (i == be16_to_cpu(node->hdr.count)) { xfs_da_brelse(NULL, bp); return(0); } @@ -1858,7 +1851,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) */ for (;;) { leaf = bp->data; - if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", XFS_ERRLEVEL_LOW, @@ -1869,7 +1862,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) error = xfs_attr_leaf_list_int(bp, context); if (error || !leaf->hdr.info.forw) break; /* not really an error, buffer full or EOF */ - cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT); + cursor->blkno = be32_to_cpu(leaf->hdr.info.forw); xfs_da_brelse(NULL, bp); error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); @@ -2232,9 +2225,10 @@ xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), - (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), - (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); + (__psunsigned_t)be16_to_cpu(node->hdr.count), + (__psunsigned_t)be32_to_cpu(node->btree[0].hashval), + (__psunsigned_t)be32_to_cpu(node->btree[ + be16_to_cpu(node->hdr.count)-1].hashval)); } /* @@ -2261,8 +2255,8 @@ xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT), - (__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT), + (__psunsigned_t)be32_to_cpu(btree->hashval), + (__psunsigned_t)be32_to_cpu(btree->before), (__psunsigned_t)NULL); } @@ -2290,9 +2284,10 @@ xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), - (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), - (__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); + (__psunsigned_t)be16_to_cpu(leaf->hdr.count), + (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval), + (__psunsigned_t)be32_to_cpu(leaf->entries[ + be16_to_cpu(leaf->hdr.count)-1].hashval)); } /* @@ -2522,7 +2517,7 @@ attr_user_capable( struct vnode *vp, cred_t *cred) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; @@ -2540,7 +2535,7 @@ attr_trusted_capable( struct vnode *vp, cred_t *cred) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index fe91eac4e2a..717682747bd 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -194,7 +194,7 @@ xfs_attr_shortform_create(xfs_da_args_t *args) xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; hdr->count = 0; - INT_SET(hdr->totsize, ARCH_CONVERT, sizeof(*hdr)); + hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -224,8 +224,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) ASSERT(ifp->if_flags & XFS_IFINLINE); sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); - sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { #ifdef DEBUG if (sfe->namelen != args->namelen) continue; @@ -248,13 +247,13 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset); sfe->namelen = args->namelen; - INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen); + sfe->valuelen = args->valuelen; sfe->flags = (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); memcpy(sfe->nameval, args->name, args->namelen); memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); - INT_MOD(sf->hdr.count, ARCH_CONVERT, 1); - INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size); + sf->hdr.count++; + be16_add(&sf->hdr.totsize, size); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); xfs_sbversion_add_attr2(mp, args->trans); @@ -277,7 +276,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) base = sizeof(xfs_attr_sf_hdr_t); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; sfe = &sf->list[0]; - end = INT_GET(sf->hdr.count, ARCH_CONVERT); + end = sf->hdr.count; for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), base += size, i++) { size = XFS_ATTR_SF_ENTSIZE(sfe); @@ -300,11 +299,11 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) * Fix up the attribute fork data, covering the hole */ end = base + size; - totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + totsize = be16_to_cpu(sf->hdr.totsize); if (end != totsize) memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); - INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); - INT_MOD(sf->hdr.totsize, ARCH_CONVERT, -size); + sf->hdr.count--; + be16_add(&sf->hdr.totsize, -size); /* * Fix up the start offset of the attribute fork @@ -360,7 +359,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args) ASSERT(ifp->if_flags & XFS_IFINLINE); sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { if (sfe->namelen != args->namelen) continue; @@ -391,7 +390,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE); sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { if (sfe->namelen != args->namelen) continue; @@ -404,14 +403,14 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) ((sfe->flags & XFS_ATTR_ROOT) != 0)) continue; if (args->flags & ATTR_KERNOVAL) { - args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + args->valuelen = sfe->valuelen; return(XFS_ERROR(EEXIST)); } - if (args->valuelen < INT_GET(sfe->valuelen, ARCH_CONVERT)) { - args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + if (args->valuelen < sfe->valuelen) { + args->valuelen = sfe->valuelen; return(XFS_ERROR(ERANGE)); } - args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + args->valuelen = sfe->valuelen; memcpy(args->value, &sfe->nameval[args->namelen], args->valuelen); return(XFS_ERROR(EEXIST)); @@ -438,7 +437,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) dp = args->dp; ifp = dp->i_afp; sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; - size = INT_GET(sf->hdr.totsize, ARCH_CONVERT); + size = be16_to_cpu(sf->hdr.totsize); tmpbuffer = kmem_alloc(size, KM_SLEEP); ASSERT(tmpbuffer != NULL); memcpy(tmpbuffer, ifp->if_u1.if_data, size); @@ -481,11 +480,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) nargs.oknoent = 1; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0; i < sf->hdr.count; i++) { nargs.name = (char *)sfe->nameval; nargs.namelen = sfe->namelen; nargs.value = (char *)&sfe->nameval[nargs.namelen]; - nargs.valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); + nargs.valuelen = sfe->valuelen; nargs.hashval = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); nargs.flags = (sfe->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : @@ -514,11 +513,9 @@ xfs_attr_shortform_compare(const void *a, const void *b) sa = (xfs_attr_sf_sort_t *)a; sb = (xfs_attr_sf_sort_t *)b; - if (INT_GET(sa->hash, ARCH_CONVERT) - < INT_GET(sb->hash, ARCH_CONVERT)) { + if (sa->hash < sb->hash) { return(-1); - } else if (INT_GET(sa->hash, ARCH_CONVERT) - > INT_GET(sb->hash, ARCH_CONVERT)) { + } else if (sa->hash > sb->hash) { return(1); } else { return(sa->entno - sb->entno); @@ -560,10 +557,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * If the buffer is large enough, do not bother with sorting. * Note the generous fudge factor of 16 overhead bytes per entry. */ - if ((dp->i_afp->if_bytes + INT_GET(sf->hdr.count, ARCH_CONVERT) * 16) - < context->bufsize) { - for (i = 0, sfe = &sf->list[0]; - i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + if ((dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize) { + for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { attrnames_t *namesp; if (((context->flags & ATTR_SECURE) != 0) != @@ -584,14 +579,13 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) if (context->flags & ATTR_KERNOVAL) { ASSERT(context->flags & ATTR_KERNAMELS); context->count += namesp->attr_namelen + - INT_GET(sfe->namelen, ARCH_CONVERT) + 1; + sfe->namelen + 1; } else { if (xfs_attr_put_listent(context, namesp, (char *)sfe->nameval, (int)sfe->namelen, - (int)INT_GET(sfe->valuelen, - ARCH_CONVERT))) + (int)sfe->valuelen)) break; } sfe = XFS_ATTR_SF_NEXTENTRY(sfe); @@ -603,7 +597,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) /* * It didn't all fit, so we have to sort everything on hashval. */ - sbsize = INT_GET(sf->hdr.count, ARCH_CONVERT) * sizeof(*sbuf); + sbsize = sf->hdr.count * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); /* @@ -611,8 +605,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * the relevant info from only those that match into a buffer. */ nsbuf = 0; - for (i = 0, sfe = &sf->list[0]; - i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { @@ -636,8 +629,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) continue; } sbp->entno = i; - INT_SET(sbp->hash, ARCH_CONVERT, - xfs_da_hashname((char *)sfe->nameval, sfe->namelen)); + sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); sbp->name = (char *)sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ @@ -660,12 +652,12 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) cursor->initted = 1; cursor->blkno = 0; for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { - if (INT_GET(sbp->hash, ARCH_CONVERT) == cursor->hashval) { + if (sbp->hash == cursor->hashval) { if (cursor->offset == count) { break; } count++; - } else if (INT_GET(sbp->hash, ARCH_CONVERT) > cursor->hashval) { + } else if (sbp->hash > cursor->hashval) { break; } } @@ -685,8 +677,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) ((sbp->flags & XFS_ATTR_ROOT) ? &attr_trusted : &attr_user); - if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) { - cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT); + if (cursor->hashval != sbp->hash) { + cursor->hashval = sbp->hash; cursor->offset = 0; } if (context->flags & ATTR_KERNOVAL) { @@ -696,7 +688,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) } else { if (xfs_attr_put_listent(context, namesp, sbp->name, sbp->namelen, - INT_GET(sbp->valuelen, ARCH_CONVERT))) + sbp->valuelen)) break; } cursor->offset++; @@ -720,12 +712,11 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) int bytes, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); entry = &leaf->entries[0]; bytes = sizeof(struct xfs_attr_sf_hdr); - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* don't copy partial entries */ if (!(entry->flags & XFS_ATTR_LOCAL)) @@ -733,11 +724,11 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) return(0); - if (INT_GET(name_loc->valuelen, ARCH_CONVERT) >= XFS_ATTR_SF_ENTSIZE_MAX) + if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) return(0); bytes += sizeof(struct xfs_attr_sf_entry)-1 + name_loc->namelen - + INT_GET(name_loc->valuelen, ARCH_CONVERT); + + be16_to_cpu(name_loc->valuelen); } if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && (bytes == sizeof(struct xfs_attr_sf_hdr))) @@ -766,8 +757,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_attr_leafblock_t *)tmpbuffer; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -810,7 +800,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) nargs.trans = args->trans; nargs.oknoent = 1; entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* don't copy partial entries */ if (!entry->nameidx) @@ -820,8 +810,8 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) nargs.name = (char *)name_loc->nameval; nargs.namelen = name_loc->namelen; nargs.value = (char *)&name_loc->nameval[nargs.namelen]; - nargs.valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); - nargs.hashval = INT_GET(entry->hashval, ARCH_CONVERT); + nargs.valuelen = be16_to_cpu(name_loc->valuelen); + nargs.hashval = be32_to_cpu(entry->hashval); nargs.flags = (entry->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : ((entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0); xfs_attr_shortform_add(&nargs, forkoff); @@ -875,13 +865,12 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args) goto out; node = bp1->data; leaf = bp2->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); /* both on-disk, don't endian-flip twice */ node->btree[0].hashval = - leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval; - INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); - INT_SET(node->hdr.count, ARCH_CONVERT, 1); + leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval; + node->btree[0].before = cpu_to_be32(blkno); + node->hdr.count = cpu_to_be16(1); xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1); error = 0; out: @@ -920,19 +909,16 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) leaf = bp->data; memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); hdr = &leaf->hdr; - INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC); - INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); + hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC); + hdr->firstused = cpu_to_be16(XFS_LBSIZE(dp->i_mount)); if (!hdr->firstused) { - INT_SET(hdr->firstused, ARCH_CONVERT, + hdr->firstused = cpu_to_be16( XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN); } - INT_SET(hdr->freemap[0].base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_hdr_t)); - INT_SET(hdr->freemap[0].size, ARCH_CONVERT, - INT_GET(hdr->firstused, ARCH_CONVERT) - - INT_GET(hdr->freemap[0].base, - ARCH_CONVERT)); + hdr->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); + hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) - + sizeof(xfs_attr_leaf_hdr_t)); xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); @@ -1004,10 +990,9 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) int tablesize, entsize, sum, tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); ASSERT((args->index >= 0) - && (args->index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); + && (args->index <= be16_to_cpu(leaf->hdr.count))); hdr = &leaf->hdr; entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen, args->trans->t_mountp->m_sb.sb_blocksize, NULL); @@ -1016,26 +1001,25 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) * Search through freemap for first-fit on new name length. * (may need to figure in size of entry struct too) */ - tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) + tablesize = (be16_to_cpu(hdr->count) + 1) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1]; for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { - if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { - sum += INT_GET(map->size, ARCH_CONVERT); + if (tablesize > be16_to_cpu(hdr->firstused)) { + sum += be16_to_cpu(map->size); continue; } if (!map->size) continue; /* no space in this map */ tmp = entsize; - if (INT_GET(map->base, ARCH_CONVERT) - < INT_GET(hdr->firstused, ARCH_CONVERT)) + if (be16_to_cpu(map->base) < be16_to_cpu(hdr->firstused)) tmp += sizeof(xfs_attr_leaf_entry_t); - if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { + if (be16_to_cpu(map->size) >= tmp) { tmp = xfs_attr_leaf_add_work(bp, args, i); return(tmp); } - sum += INT_GET(map->size, ARCH_CONVERT); + sum += be16_to_cpu(map->size); } /* @@ -1056,7 +1040,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) * After compaction, the block is guaranteed to have only one * free region, in freemap[0]. If it is not big enough, give up. */ - if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) + if (be16_to_cpu(hdr->freemap[0].size) < (entsize + sizeof(xfs_attr_leaf_entry_t))) return(XFS_ERROR(ENOSPC)); @@ -1079,45 +1063,42 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) int tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); - ASSERT((args->index >= 0) - && (args->index <= INT_GET(hdr->count, ARCH_CONVERT))); + ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count))); /* * Force open some space in the entry array and fill it in. */ entry = &leaf->entries[args->index]; - if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) { - tmp = INT_GET(hdr->count, ARCH_CONVERT) - args->index; + if (args->index < be16_to_cpu(hdr->count)) { + tmp = be16_to_cpu(hdr->count) - args->index; tmp *= sizeof(xfs_attr_leaf_entry_t); memmove((char *)(entry+1), (char *)entry, tmp); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); } - INT_MOD(hdr->count, ARCH_CONVERT, 1); + be16_add(&hdr->count, 1); /* * Allocate space for the new string (at the end of the run). */ map = &hdr->freemap[mapindex]; mp = args->trans->t_mountp; - ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); - ASSERT((INT_GET(map->base, ARCH_CONVERT) & 0x3) == 0); - ASSERT(INT_GET(map->size, ARCH_CONVERT) >= + ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); + ASSERT((be16_to_cpu(map->base) & 0x3) == 0); + ASSERT(be16_to_cpu(map->size) >= xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, NULL)); - ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); - ASSERT((INT_GET(map->size, ARCH_CONVERT) & 0x3) == 0); - INT_MOD(map->size, ARCH_CONVERT, + ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); + ASSERT((be16_to_cpu(map->size) & 0x3) == 0); + be16_add(&map->size, -xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, &tmp)); - INT_SET(entry->nameidx, ARCH_CONVERT, - INT_GET(map->base, ARCH_CONVERT) - + INT_GET(map->size, ARCH_CONVERT)); - INT_SET(entry->hashval, ARCH_CONVERT, args->hashval); + entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) + + be16_to_cpu(map->size)); + entry->hashval = cpu_to_be32(args->hashval); entry->flags = tmp ? XFS_ATTR_LOCAL : 0; entry->flags |= (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); @@ -1130,12 +1111,10 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) } xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); - ASSERT((args->index == 0) || (INT_GET(entry->hashval, ARCH_CONVERT) - >= INT_GET((entry-1)->hashval, - ARCH_CONVERT))); - ASSERT((args->index == INT_GET(hdr->count, ARCH_CONVERT)-1) || - (INT_GET(entry->hashval, ARCH_CONVERT) - <= (INT_GET((entry+1)->hashval, ARCH_CONVERT)))); + ASSERT((args->index == 0) || + (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval))); + ASSERT((args->index == be16_to_cpu(hdr->count)-1) || + (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval))); /* * Copy the attribute name and value into the new space. @@ -1149,10 +1128,10 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) if (entry->flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); name_loc->namelen = args->namelen; - INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen); + name_loc->valuelen = cpu_to_be16(args->valuelen); memcpy((char *)name_loc->nameval, args->name, args->namelen); memcpy((char *)&name_loc->nameval[args->namelen], args->value, - INT_GET(name_loc->valuelen, ARCH_CONVERT)); + be16_to_cpu(name_loc->valuelen)); } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); name_rmt->namelen = args->namelen; @@ -1171,28 +1150,23 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) /* * Update the control info for this leaf node */ - if (INT_GET(entry->nameidx, ARCH_CONVERT) - < INT_GET(hdr->firstused, ARCH_CONVERT)) { + if (be16_to_cpu(entry->nameidx) < be16_to_cpu(hdr->firstused)) { /* both on-disk, don't endian-flip twice */ hdr->firstused = entry->nameidx; } - ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) - >= ((INT_GET(hdr->count, ARCH_CONVERT) - * sizeof(*entry))+sizeof(*hdr))); - tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) - * sizeof(xfs_attr_leaf_entry_t) + ASSERT(be16_to_cpu(hdr->firstused) >= + ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr))); + tmp = (be16_to_cpu(hdr->count)-1) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[0]; for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { - if (INT_GET(map->base, ARCH_CONVERT) == tmp) { - INT_MOD(map->base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_entry_t)); - INT_MOD(map->size, ARCH_CONVERT, - -sizeof(xfs_attr_leaf_entry_t)); + if (be16_to_cpu(map->base) == tmp) { + be16_add(&map->base, sizeof(xfs_attr_leaf_entry_t)); + be16_add(&map->size, + -((int)sizeof(xfs_attr_leaf_entry_t))); } } - INT_MOD(hdr->usedbytes, ARCH_CONVERT, - xfs_attr_leaf_entsize(leaf, args->index)); + be16_add(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index)); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); return(0); @@ -1223,28 +1197,25 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp) hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; hdr_d->info = hdr_s->info; /* struct copy */ - INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp)); + hdr_d->firstused = cpu_to_be16(XFS_LBSIZE(mp)); /* handle truncation gracefully */ if (!hdr_d->firstused) { - INT_SET(hdr_d->firstused, ARCH_CONVERT, + hdr_d->firstused = cpu_to_be16( XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN); } hdr_d->usedbytes = 0; hdr_d->count = 0; hdr_d->holes = 0; - INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_hdr_t)); - INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, - INT_GET(hdr_d->firstused, ARCH_CONVERT) - - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); + hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) - + sizeof(xfs_attr_leaf_hdr_t)); /* * Copy all entry's in the same (sorted) order, * but allocate name/value pairs packed and in sequence. */ xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0, - (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); - + be16_to_cpu(hdr_s->count), mp); xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); kmem_free(tmpbuffer, XFS_LBSIZE(mp)); @@ -1279,10 +1250,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); args = state->args; /* @@ -1319,22 +1288,21 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move any entries required from leaf to leaf: */ - if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { + if (count < be16_to_cpu(hdr1->count)) { /* * Figure the total bytes to be added to the destination leaf. */ /* number entries being moved */ - count = INT_GET(hdr1->count, ARCH_CONVERT) - count; - space = INT_GET(hdr1->usedbytes, ARCH_CONVERT) - totallen; + count = be16_to_cpu(hdr1->count) - count; + space = be16_to_cpu(hdr1->usedbytes) - totallen; space += count * sizeof(xfs_attr_leaf_entry_t); /* * leaf2 is the destination, compact it if it looks tight. */ - max = INT_GET(hdr2->firstused, ARCH_CONVERT) + max = be16_to_cpu(hdr2->firstused) - sizeof(xfs_attr_leaf_hdr_t); - max -= INT_GET(hdr2->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t); + max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t); if (space > max) { xfs_attr_leaf_compact(args->trans, blk2->bp); } @@ -1342,13 +1310,12 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move high entries from leaf1 to low end of leaf2. */ - xfs_attr_leaf_moveents(leaf1, - INT_GET(hdr1->count, ARCH_CONVERT)-count, + xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count, leaf2, 0, count, state->mp); xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); - } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { + } else if (count > be16_to_cpu(hdr1->count)) { /* * I assert that since all callers pass in an empty * second buffer, this code should never execute. @@ -1358,17 +1325,16 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Figure the total bytes to be added to the destination leaf. */ /* number entries being moved */ - count -= INT_GET(hdr1->count, ARCH_CONVERT); - space = totallen - INT_GET(hdr1->usedbytes, ARCH_CONVERT); + count -= be16_to_cpu(hdr1->count); + space = totallen - be16_to_cpu(hdr1->usedbytes); space += count * sizeof(xfs_attr_leaf_entry_t); /* * leaf1 is the destination, compact it if it looks tight. */ - max = INT_GET(hdr1->firstused, ARCH_CONVERT) + max = be16_to_cpu(hdr1->firstused) - sizeof(xfs_attr_leaf_hdr_t); - max -= INT_GET(hdr1->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t); + max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t); if (space > max) { xfs_attr_leaf_compact(args->trans, blk1->bp); } @@ -1377,8 +1343,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Move low entries from leaf2 to high end of leaf1. */ xfs_attr_leaf_moveents(leaf2, 0, leaf1, - (int)INT_GET(hdr1->count, ARCH_CONVERT), count, - state->mp); + be16_to_cpu(hdr1->count), count, state->mp); xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); @@ -1387,12 +1352,10 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Copy out last hashval in each block for B-tree code. */ - blk1->hashval = - INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT); - blk2->hashval = - INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT); + blk1->hashval = be32_to_cpu( + leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval); + blk2->hashval = be32_to_cpu( + leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval); /* * Adjust the expected index for insertion. @@ -1406,13 +1369,12 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * inserting. The index/blkno fields refer to the "old" entry, * while the index2/blkno2 fields refer to the "new" entry. */ - if (blk1->index > INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + if (blk1->index > be16_to_cpu(leaf1->hdr.count)) { ASSERT(state->inleaf == 0); - blk2->index = blk1->index - - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); args->index = args->index2 = blk2->index; args->blkno = args->blkno2 = blk2->blkno; - } else if (blk1->index == INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { + } else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) { if (state->inleaf) { args->index = blk1->index; args->blkno = blk1->blkno; @@ -1420,7 +1382,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, args->blkno2 = blk2->blkno; } else { blk2->index = blk1->index - - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + - be16_to_cpu(leaf1->hdr.count); args->index = args->index2 = blk2->index; args->blkno = args->blkno2 = blk2->blkno; } @@ -1464,15 +1426,14 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state, * Examine entries until we reduce the absolute difference in * byte usage between the two blocks to a minimum. */ - max = INT_GET(hdr1->count, ARCH_CONVERT) - + INT_GET(hdr2->count, ARCH_CONVERT); + max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count); half = (max+1) * sizeof(*entry); - half += INT_GET(hdr1->usedbytes, ARCH_CONVERT) - + INT_GET(hdr2->usedbytes, ARCH_CONVERT) - + xfs_attr_leaf_newentsize( - state->args->namelen, - state->args->valuelen, - state->blocksize, NULL); + half += be16_to_cpu(hdr1->usedbytes) + + be16_to_cpu(hdr2->usedbytes) + + xfs_attr_leaf_newentsize( + state->args->namelen, + state->args->valuelen, + state->blocksize, NULL); half /= 2; lastdelta = state->blocksize; entry = &leaf1->entries[0]; @@ -1498,7 +1459,7 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state, /* * Wrap around into the second block if necessary. */ - if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { + if (count == be16_to_cpu(hdr1->count)) { leaf1 = leaf2; entry = &leaf1->entries[0]; index = 0; @@ -1566,12 +1527,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); leaf = (xfs_attr_leafblock_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count); bytes = sizeof(xfs_attr_leaf_hdr_t) + count * sizeof(xfs_attr_leaf_entry_t) + - INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + be16_to_cpu(leaf->hdr.usedbytes); if (bytes > (state->blocksize >> 1)) { *action = 0; /* blk over 50%, don't try to join */ return(0); @@ -1588,7 +1549,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -1610,13 +1571,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) * to shrink an attribute list over time. */ /* start with smaller blk num */ - forward = (INT_GET(info->forw, ARCH_CONVERT) - < INT_GET(info->back, ARCH_CONVERT)); + forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = INT_GET(info->forw, ARCH_CONVERT); + blkno = be32_to_cpu(info->forw); else - blkno = INT_GET(info->back, ARCH_CONVERT); + blkno = be32_to_cpu(info->back); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -1626,14 +1586,13 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) ASSERT(bp != NULL); leaf = (xfs_attr_leafblock_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count); bytes = state->blocksize - (state->blocksize>>2); - bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + bytes -= be16_to_cpu(leaf->hdr.usedbytes); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - count += INT_GET(leaf->hdr.count, ARCH_CONVERT); - bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + count += be16_to_cpu(leaf->hdr.count); + bytes -= be16_to_cpu(leaf->hdr.usedbytes); bytes -= count * sizeof(xfs_attr_leaf_entry_t); bytes -= sizeof(xfs_attr_leaf_hdr_t); xfs_da_brelse(state->args->trans, bp); @@ -1685,21 +1644,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_mount_t *mp; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); hdr = &leaf->hdr; mp = args->trans->t_mountp; - ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) - && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); + ASSERT((be16_to_cpu(hdr->count) > 0) + && (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8))); ASSERT((args->index >= 0) - && (args->index < INT_GET(hdr->count, ARCH_CONVERT))); - ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) - >= ((INT_GET(hdr->count, ARCH_CONVERT) - * sizeof(*entry))+sizeof(*hdr))); + && (args->index < be16_to_cpu(hdr->count))); + ASSERT(be16_to_cpu(hdr->firstused) >= + ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr))); entry = &leaf->entries[args->index]; - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) - >= INT_GET(hdr->firstused, ARCH_CONVERT)); - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused)); + ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); /* * Scan through free region table: @@ -1707,33 +1663,30 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * find smallest free region in case we need to replace it, * adjust any map that borders the entry table, */ - tablesize = INT_GET(hdr->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t) + tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[0]; - tmp = INT_GET(map->size, ARCH_CONVERT); + tmp = be16_to_cpu(map->size); before = after = -1; smallest = XFS_ATTR_LEAF_MAPSIZE - 1; entsize = xfs_attr_leaf_entsize(leaf, args->index); for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { - ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); - ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); - if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { - INT_MOD(map->base, ARCH_CONVERT, - -sizeof(xfs_attr_leaf_entry_t)); - INT_MOD(map->size, ARCH_CONVERT, - sizeof(xfs_attr_leaf_entry_t)); + ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); + ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); + if (be16_to_cpu(map->base) == tablesize) { + be16_add(&map->base, + -((int)sizeof(xfs_attr_leaf_entry_t))); + be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t)); } - if ((INT_GET(map->base, ARCH_CONVERT) - + INT_GET(map->size, ARCH_CONVERT)) - == INT_GET(entry->nameidx, ARCH_CONVERT)) { + if ((be16_to_cpu(map->base) + be16_to_cpu(map->size)) + == be16_to_cpu(entry->nameidx)) { before = i; - } else if (INT_GET(map->base, ARCH_CONVERT) - == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { + } else if (be16_to_cpu(map->base) + == (be16_to_cpu(entry->nameidx) + entsize)) { after = i; - } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { - tmp = INT_GET(map->size, ARCH_CONVERT); + } else if (be16_to_cpu(map->size) < tmp) { + tmp = be16_to_cpu(map->size); smallest = i; } } @@ -1745,38 +1698,35 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) if ((before >= 0) || (after >= 0)) { if ((before >= 0) && (after >= 0)) { map = &hdr->freemap[before]; - INT_MOD(map->size, ARCH_CONVERT, entsize); - INT_MOD(map->size, ARCH_CONVERT, - INT_GET(hdr->freemap[after].size, - ARCH_CONVERT)); + be16_add(&map->size, entsize); + be16_add(&map->size, + be16_to_cpu(hdr->freemap[after].size)); hdr->freemap[after].base = 0; hdr->freemap[after].size = 0; } else if (before >= 0) { map = &hdr->freemap[before]; - INT_MOD(map->size, ARCH_CONVERT, entsize); + be16_add(&map->size, entsize); } else { map = &hdr->freemap[after]; /* both on-disk, don't endian flip twice */ map->base = entry->nameidx; - INT_MOD(map->size, ARCH_CONVERT, entsize); + be16_add(&map->size, entsize); } } else { /* * Replace smallest region (if it is smaller than free'd entry) */ map = &hdr->freemap[smallest]; - if (INT_GET(map->size, ARCH_CONVERT) < entsize) { - INT_SET(map->base, ARCH_CONVERT, - INT_GET(entry->nameidx, ARCH_CONVERT)); - INT_SET(map->size, ARCH_CONVERT, entsize); + if (be16_to_cpu(map->size) < entsize) { + map->base = cpu_to_be16(be16_to_cpu(entry->nameidx)); + map->size = cpu_to_be16(entsize); } } /* * Did we remove the first entry? */ - if (INT_GET(entry->nameidx, ARCH_CONVERT) - == INT_GET(hdr->firstused, ARCH_CONVERT)) + if (be16_to_cpu(entry->nameidx) == be16_to_cpu(hdr->firstused)) smallest = 1; else smallest = 0; @@ -1785,18 +1735,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * Compress the remaining entries and zero out the removed stuff. */ memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); - INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize); + be16_add(&hdr->usedbytes, -entsize); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), entsize)); - tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index) + tmp = (be16_to_cpu(hdr->count) - args->index) * sizeof(xfs_attr_leaf_entry_t); memmove((char *)entry, (char *)(entry+1), tmp); - INT_MOD(hdr->count, ARCH_CONVERT, -1); + be16_add(&hdr->count, -1); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); - entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; + entry = &leaf->entries[be16_to_cpu(hdr->count)]; memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t)); /* @@ -1808,18 +1758,17 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) if (smallest) { tmp = XFS_LBSIZE(mp); entry = &leaf->entries[0]; - for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; - i >= 0; entry++, i--) { - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) - >= INT_GET(hdr->firstused, ARCH_CONVERT)); - ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) - < XFS_LBSIZE(mp)); - if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) - tmp = INT_GET(entry->nameidx, ARCH_CONVERT); + for (i = be16_to_cpu(hdr->count)-1; i >= 0; entry++, i--) { + ASSERT(be16_to_cpu(entry->nameidx) >= + be16_to_cpu(hdr->firstused)); + ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); + + if (be16_to_cpu(entry->nameidx) < tmp) + tmp = be16_to_cpu(entry->nameidx); } - INT_SET(hdr->firstused, ARCH_CONVERT, tmp); + hdr->firstused = cpu_to_be16(tmp); if (!hdr->firstused) { - INT_SET(hdr->firstused, ARCH_CONVERT, + hdr->firstused = cpu_to_be16( tmp - XFS_ATTR_LEAF_NAME_ALIGN); } } else { @@ -1833,9 +1782,8 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * "join" the leaf with a sibling if so. */ tmp = sizeof(xfs_attr_leaf_hdr_t); - tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t); - tmp += INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); + tmp += be16_to_cpu(leaf->hdr.count) * sizeof(xfs_attr_leaf_entry_t); + tmp += be16_to_cpu(leaf->hdr.usedbytes); return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */ } @@ -1859,20 +1807,16 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; /* * Save last hashval from dying block for later Btree fixup. */ - drop_blk->hashval = - INT_GET(drop_leaf->entries[INT_GET(drop_leaf->hdr.count, - ARCH_CONVERT)-1].hashval, - ARCH_CONVERT); + drop_blk->hashval = be32_to_cpu( + drop_leaf->entries[be16_to_cpu(drop_leaf->hdr.count)-1].hashval); /* * Check if we need a temp buffer, or can we do it in place. @@ -1886,12 +1830,11 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, */ if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0, - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); + be16_to_cpu(drop_hdr->count), mp); } else { xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, - INT_GET(save_hdr->count, ARCH_CONVERT), - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(save_hdr->count), + be16_to_cpu(drop_hdr->count), mp); } } else { /* @@ -1905,28 +1848,24 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, tmp_hdr = &tmp_leaf->hdr; tmp_hdr->info = save_hdr->info; /* struct copy */ tmp_hdr->count = 0; - INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); + tmp_hdr->firstused = cpu_to_be16(state->blocksize); if (!tmp_hdr->firstused) { - INT_SET(tmp_hdr->firstused, ARCH_CONVERT, + tmp_hdr->firstused = cpu_to_be16( state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN); } tmp_hdr->usedbytes = 0; if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(drop_hdr->count), mp); xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, - INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), - (int)INT_GET(save_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(tmp_leaf->hdr.count), + be16_to_cpu(save_hdr->count), mp); } else { xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0, - (int)INT_GET(save_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(save_hdr->count), mp); xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, - INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), - (int)INT_GET(drop_hdr->count, ARCH_CONVERT), - mp); + be16_to_cpu(tmp_leaf->hdr.count), + be16_to_cpu(drop_hdr->count), mp); } memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize); kmem_free(tmpbuffer, state->blocksize); @@ -1938,10 +1877,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, /* * Copy out last hashval in each block for B-tree code. */ - save_blk->hashval = - INT_GET(save_leaf->entries[INT_GET(save_leaf->hdr.count, - ARCH_CONVERT)-1].hashval, - ARCH_CONVERT); + save_blk->hashval = be32_to_cpu( + save_leaf->entries[be16_to_cpu(save_leaf->hdr.count)-1].hashval); } /*======================================================================== @@ -1972,48 +1909,45 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); /* * Binary search. (note: small blocks will skip this loop) */ hashval = args->hashval; - probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; + probe = span = be16_to_cpu(leaf->hdr.count) / 2; for (entry = &leaf->entries[probe]; span > 4; entry = &leaf->entries[probe]) { span /= 2; - if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) + if (be32_to_cpu(entry->hashval) < hashval) probe += span; - else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) + else if (be32_to_cpu(entry->hashval) > hashval) probe -= span; else break; } ASSERT((probe >= 0) && (!leaf->hdr.count - || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); - ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) - == hashval)); + || (probe < be16_to_cpu(leaf->hdr.count)))); + ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval)); /* * Since we may have duplicate hashval's, find the first matching * hashval in the leaf. */ - while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) - >= hashval)) { + while ((probe > 0) && (be32_to_cpu(entry->hashval) >= hashval)) { entry--; probe--; } - while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) - && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { + while ((probe < be16_to_cpu(leaf->hdr.count)) && + (be32_to_cpu(entry->hashval) < hashval)) { entry++; probe++; } - if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) - || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { + if ((probe == be16_to_cpu(leaf->hdr.count)) || + (be32_to_cpu(entry->hashval) != hashval)) { args->index = probe; return(XFS_ERROR(ENOATTR)); } @@ -2021,8 +1955,8 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) /* * Duplicate keys may be present, so search all of them for a match. */ - for ( ; (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) - && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval); + for ( ; (probe < be16_to_cpu(leaf->hdr.count)) && + (be32_to_cpu(entry->hashval) == hashval); entry++, probe++) { /* * GROT: Add code to remove incomplete entries. @@ -2064,11 +1998,9 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) ((entry->flags & XFS_ATTR_ROOT) != 0)) continue; args->index = probe; - args->rmtblkno - = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + args->rmtblkno = be32_to_cpu(name_rmt->valueblk); args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, - INT_GET(name_rmt->valuelen, - ARCH_CONVERT)); + be32_to_cpu(name_rmt->valuelen)); return(XFS_ERROR(EEXIST)); } } @@ -2090,18 +2022,17 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_attr_leaf_name_remote_t *name_rmt; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); - ASSERT(args->index < ((int)INT_GET(leaf->hdr.count, ARCH_CONVERT))); + ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); entry = &leaf->entries[args->index]; if (entry->flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); ASSERT(name_loc->namelen == args->namelen); ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); - valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); + valuelen = be16_to_cpu(name_loc->valuelen); if (args->flags & ATTR_KERNOVAL) { args->valuelen = valuelen; return(0); @@ -2116,8 +2047,8 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); ASSERT(name_rmt->namelen == args->namelen); ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); - valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT); - args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT); + valuelen = be32_to_cpu(name_rmt->valuelen); + args->rmtblkno = be32_to_cpu(name_rmt->valueblk); args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); if (args->flags & ATTR_KERNOVAL) { args->valuelen = valuelen; @@ -2159,32 +2090,29 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; - ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) - && (INT_GET(hdr_s->count, ARCH_CONVERT) - < (XFS_LBSIZE(mp)/8))); - ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= - ((INT_GET(hdr_s->count, ARCH_CONVERT) + ASSERT((be16_to_cpu(hdr_s->count) > 0) && + (be16_to_cpu(hdr_s->count) < (XFS_LBSIZE(mp)/8))); + ASSERT(be16_to_cpu(hdr_s->firstused) >= + ((be16_to_cpu(hdr_s->count) * sizeof(*entry_s))+sizeof(*hdr_s))); - ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); - ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= - ((INT_GET(hdr_d->count, ARCH_CONVERT) + ASSERT(be16_to_cpu(hdr_d->count) < (XFS_LBSIZE(mp)/8)); + ASSERT(be16_to_cpu(hdr_d->firstused) >= + ((be16_to_cpu(hdr_d->count) * sizeof(*entry_d))+sizeof(*hdr_d))); - ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); - ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); - ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); + ASSERT(start_s < be16_to_cpu(hdr_s->count)); + ASSERT(start_d <= be16_to_cpu(hdr_d->count)); + ASSERT(count <= be16_to_cpu(hdr_s->count)); /* * Move the entries in the destination leaf up to make a hole? */ - if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { - tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; + if (start_d < be16_to_cpu(hdr_d->count)) { + tmp = be16_to_cpu(hdr_d->count) - start_d; tmp *= sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_d->entries[start_d]; entry_d = &leaf_d->entries[start_d + count]; @@ -2199,8 +2127,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, entry_d = &leaf_d->entries[start_d]; desti = start_d; for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { - ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) - >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); + ASSERT(be16_to_cpu(entry_s->nameidx) + >= be16_to_cpu(hdr_s->firstused)); tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); #ifdef GROT /* @@ -2210,35 +2138,35 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, */ if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); - INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); - INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + be16_add(&hdr_s->usedbytes, -tmp); + be16_add(&hdr_s->count, -1); entry_d--; /* to compensate for ++ in loop hdr */ desti--; if ((start_s + i) < offset) result++; /* insertion index adjustment */ } else { #endif /* GROT */ - INT_MOD(hdr_d->firstused, ARCH_CONVERT, -tmp); + be16_add(&hdr_d->firstused, -tmp); /* both on-disk, don't endian flip twice */ entry_d->hashval = entry_s->hashval; /* both on-disk, don't endian flip twice */ entry_d->nameidx = hdr_d->firstused; entry_d->flags = entry_s->flags; - ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp + ASSERT(be16_to_cpu(entry_d->nameidx) + tmp <= XFS_LBSIZE(mp)); memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti), XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp); - ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp + ASSERT(be16_to_cpu(entry_s->nameidx) + tmp <= XFS_LBSIZE(mp)); memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); - INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); - INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp); - INT_MOD(hdr_s->count, ARCH_CONVERT, -1); - INT_MOD(hdr_d->count, ARCH_CONVERT, 1); - tmp = INT_GET(hdr_d->count, ARCH_CONVERT) + be16_add(&hdr_s->usedbytes, -tmp); + be16_add(&hdr_d->usedbytes, tmp); + be16_add(&hdr_s->count, -1); + be16_add(&hdr_d->count, 1); + tmp = be16_to_cpu(hdr_d->count) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); - ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); + ASSERT(be16_to_cpu(hdr_d->firstused) >= tmp); #ifdef GROT } #endif /* GROT */ @@ -2247,7 +2175,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Zero out the entries we just copied. */ - if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { + if (start_s == be16_to_cpu(hdr_s->count)) { tmp = count * sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_s->entries[start_s]; ASSERT(((char *)entry_s + tmp) <= @@ -2258,15 +2186,14 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, * Move the remaining entries down to fill the hole, * then zero the entries at the top. */ - tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; + tmp = be16_to_cpu(hdr_s->count) - count; tmp *= sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_s->entries[start_s + count]; entry_d = &leaf_s->entries[start_s]; memmove((char *)entry_d, (char *)entry_s, tmp); tmp = count * sizeof(xfs_attr_leaf_entry_t); - entry_s = &leaf_s->entries[INT_GET(hdr_s->count, - ARCH_CONVERT)]; + entry_s = &leaf_s->entries[be16_to_cpu(hdr_s->count)]; ASSERT(((char *)entry_s + tmp) <= ((char *)leaf_s + XFS_LBSIZE(mp))); memset((char *)entry_s, 0, tmp); @@ -2275,14 +2202,11 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Fill in the freemap information */ - INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, - sizeof(xfs_attr_leaf_hdr_t)); - INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, - INT_GET(hdr_d->count, ARCH_CONVERT) - * sizeof(xfs_attr_leaf_entry_t)); - INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, - INT_GET(hdr_d->firstused, ARCH_CONVERT) - - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); + hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); + be16_add(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) * + sizeof(xfs_attr_leaf_entry_t)); + hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) + - be16_to_cpu(hdr_d->freemap[0].base)); hdr_d->freemap[1].base = 0; hdr_d->freemap[2].base = 0; hdr_d->freemap[1].size = 0; @@ -2301,18 +2225,16 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC) && - (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC)); - if ( (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) - && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) - && ( (INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < - INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) - || (INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT) < - INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT))) ) { + ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) && + (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC)); + if ((be16_to_cpu(leaf1->hdr.count) > 0) && + (be16_to_cpu(leaf2->hdr.count) > 0) && + ((be32_to_cpu(leaf2->entries[0].hashval) < + be32_to_cpu(leaf1->entries[0].hashval)) || + (be32_to_cpu(leaf2->entries[ + be16_to_cpu(leaf2->hdr.count)-1].hashval) < + be32_to_cpu(leaf1->entries[ + be16_to_cpu(leaf1->hdr.count)-1].hashval)))) { return(1); } return(0); @@ -2327,14 +2249,12 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_attr_leafblock_t *leaf; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); if (count) - *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) return(0); - return(INT_GET(leaf->entries[INT_GET(leaf->hdr.count, - ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); + return be32_to_cpu(leaf->entries[be16_to_cpu(leaf->hdr.count)-1].hashval); } /* @@ -2348,13 +2268,11 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) xfs_attr_leaf_name_remote_t *name_rmt; int size; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index); size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen, - INT_GET(name_loc->valuelen, - ARCH_CONVERT)); + be16_to_cpu(name_loc->valuelen)); } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index); size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen); @@ -2412,22 +2330,20 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) */ if (context->resynch) { entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); - entry++, i++) { - if (INT_GET(entry->hashval, ARCH_CONVERT) - == cursor->hashval) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + if (be32_to_cpu(entry->hashval) == cursor->hashval) { if (cursor->offset == context->dupcnt) { context->dupcnt = 0; break; } context->dupcnt++; - } else if (INT_GET(entry->hashval, ARCH_CONVERT) - > cursor->hashval) { + } else if (be32_to_cpu(entry->hashval) > + cursor->hashval) { context->dupcnt = 0; break; } } - if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + if (i == be16_to_cpu(leaf->hdr.count)) { xfs_attr_trace_l_c("not found", context); return(0); } @@ -2441,12 +2357,12 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) * We have found our place, start copying out the new attributes. */ retval = 0; - for ( ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + for ( ; (i < be16_to_cpu(leaf->hdr.count)) && (retval == 0); entry++, i++) { attrnames_t *namesp; - if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) { - cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT); + if (be32_to_cpu(entry->hashval) != cursor->hashval) { + cursor->hashval = be32_to_cpu(entry->hashval); cursor->offset = 0; } @@ -2475,8 +2391,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) retval = xfs_attr_put_listent(context, namesp, (char *)name_loc->nameval, (int)name_loc->namelen, - (int)INT_GET(name_loc->valuelen, - ARCH_CONVERT)); + be16_to_cpu(name_loc->valuelen)); } } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); @@ -2488,8 +2403,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) retval = xfs_attr_put_listent(context, namesp, (char *)name_rmt->name, (int)name_rmt->namelen, - (int)INT_GET(name_rmt->valuelen, - ARCH_CONVERT)); + be32_to_cpu(name_rmt->valuelen)); } } if (retval == 0) { @@ -2596,9 +2510,8 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); @@ -2613,7 +2526,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) namelen = name_rmt->namelen; name = (char *)name_rmt->name; } - ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval); + ASSERT(be32_to_cpu(entry->hashval) == args->hashval); ASSERT(namelen == args->namelen); ASSERT(memcmp(name, args->name, namelen) == 0); #endif /* DEBUG */ @@ -2625,8 +2538,8 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) if (args->rmtblkno) { ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); - INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); - INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + name_rmt->valueblk = cpu_to_be32(args->rmtblkno); + name_rmt->valuelen = cpu_to_be32(args->valuelen); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); } @@ -2663,9 +2576,8 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; @@ -2736,16 +2648,14 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) } leaf1 = bp1->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < INT_GET(leaf1->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < be16_to_cpu(leaf1->hdr.count)); ASSERT(args->index >= 0); entry1 = &leaf1->entries[ args->index ]; leaf2 = bp2->data; - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index2 < INT_GET(leaf2->hdr.count, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count)); ASSERT(args->index2 >= 0); entry2 = &leaf2->entries[ args->index2 ]; @@ -2768,7 +2678,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) namelen2 = name_rmt->namelen; name2 = (char *)name_rmt->name; } - ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT)); + ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval)); ASSERT(namelen1 == namelen2); ASSERT(memcmp(name1, name2, namelen1) == 0); #endif /* DEBUG */ @@ -2782,8 +2692,8 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) if (args->rmtblkno) { ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); - INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); - INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); + name_rmt->valueblk = cpu_to_be32(args->rmtblkno); + name_rmt->valuelen = cpu_to_be32(args->valuelen); xfs_da_log_buf(args->trans, bp1, XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); } @@ -2842,9 +2752,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) * This is a depth-first traversal! */ info = bp->data; - if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { error = xfs_attr_node_inactive(trans, dp, bp, 1); - } else if (INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { error = xfs_attr_leaf_inactive(trans, dp, bp); } else { error = XFS_ERROR(EIO); @@ -2892,15 +2802,14 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, } node = bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) - == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ - count = INT_GET(node->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(node->hdr.count); if (!count) { xfs_da_brelse(*trans, bp); return(0); } - child_fsb = INT_GET(node->btree[0].before, ARCH_CONVERT); + child_fsb = be32_to_cpu(node->btree[0].before); xfs_da_brelse(*trans, bp); /* no locks for later trans */ /* @@ -2927,12 +2836,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, * Invalidate the subtree, however we have to. */ info = child_bp->data; - if (INT_GET(info->magic, ARCH_CONVERT) - == XFS_DA_NODE_MAGIC) { + if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { error = xfs_attr_node_inactive(trans, dp, child_bp, level+1); - } else if (INT_GET(info->magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC) { + } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { error = xfs_attr_leaf_inactive(trans, dp, child_bp); } else { @@ -2962,7 +2869,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, &bp, XFS_ATTR_FORK); if (error) return(error); - child_fsb = INT_GET(node->btree[i+1].before, ARCH_CONVERT); + child_fsb = be32_to_cpu(node->btree[i+1].before); xfs_da_brelse(*trans, bp); } /* @@ -2991,17 +2898,16 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) int error, count, size, tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) - == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); /* * Count the number of "remote" value extents. */ count = 0; entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { - if ( INT_GET(entry->nameidx, ARCH_CONVERT) - && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + if (be16_to_cpu(entry->nameidx) && + ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); if (name_rmt->valueblk) count++; @@ -3027,17 +2933,14 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) */ lp = list; entry = &leaf->entries[0]; - for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { - if ( INT_GET(entry->nameidx, ARCH_CONVERT) - && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + if (be16_to_cpu(entry->nameidx) && + ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); if (name_rmt->valueblk) { - /* both on-disk, don't endian flip twice */ - lp->valueblk = name_rmt->valueblk; - INT_SET(lp->valuelen, ARCH_CONVERT, - XFS_B_TO_FSB(dp->i_mount, - INT_GET(name_rmt->valuelen, - ARCH_CONVERT))); + lp->valueblk = be32_to_cpu(name_rmt->valueblk); + lp->valuelen = XFS_B_TO_FSB(dp->i_mount, + be32_to_cpu(name_rmt->valuelen)); lp++; } } @@ -3050,10 +2953,8 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) error = 0; for (lp = list, i = 0; i < count; i++, lp++) { tmp = xfs_attr_leaf_freextent(trans, dp, - INT_GET(lp->valueblk, - ARCH_CONVERT), - INT_GET(lp->valuelen, - ARCH_CONVERT)); + lp->valueblk, lp->valuelen); + if (error == 0) error = tmp; /* save only the 1st errno */ } diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h index 541e34109bb..51c3ee156b2 100644 --- a/fs/xfs/xfs_attr_leaf.h +++ b/fs/xfs/xfs_attr_leaf.h @@ -73,39 +73,39 @@ struct xfs_trans; #define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */ - __uint16_t base; /* base of free region */ - __uint16_t size; /* length of free region */ + __be16 base; /* base of free region */ + __be16 size; /* length of free region */ } xfs_attr_leaf_map_t; typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */ xfs_da_blkinfo_t info; /* block type, links, etc. */ - __uint16_t count; /* count of active leaf_entry's */ - __uint16_t usedbytes; /* num bytes of names/values stored */ - __uint16_t firstused; /* first used byte in name area */ - __uint8_t holes; /* != 0 if blk needs compaction */ - __uint8_t pad1; + __be16 count; /* count of active leaf_entry's */ + __be16 usedbytes; /* num bytes of names/values stored */ + __be16 firstused; /* first used byte in name area */ + __u8 holes; /* != 0 if blk needs compaction */ + __u8 pad1; xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */ } xfs_attr_leaf_hdr_t; typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */ - xfs_dahash_t hashval; /* hash value of name */ - __uint16_t nameidx; /* index into buffer of name/value */ - __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ - __uint8_t pad2; /* unused pad byte */ + __be32 hashval; /* hash value of name */ + __be16 nameidx; /* index into buffer of name/value */ + __u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ + __u8 pad2; /* unused pad byte */ } xfs_attr_leaf_entry_t; typedef struct xfs_attr_leaf_name_local { - __uint16_t valuelen; /* number of bytes in value */ - __uint8_t namelen; /* length of name bytes */ - __uint8_t nameval[1]; /* name/value bytes */ + __be16 valuelen; /* number of bytes in value */ + __u8 namelen; /* length of name bytes */ + __u8 nameval[1]; /* name/value bytes */ } xfs_attr_leaf_name_local_t; typedef struct xfs_attr_leaf_name_remote { - xfs_dablk_t valueblk; /* block number of value bytes */ - __uint32_t valuelen; /* number of bytes in value */ - __uint8_t namelen; /* length of name bytes */ - __uint8_t name[1]; /* name bytes */ + __be32 valueblk; /* block number of value bytes */ + __be32 valuelen; /* number of bytes in value */ + __u8 namelen; /* length of name bytes */ + __u8 name[1]; /* name bytes */ } xfs_attr_leaf_name_remote_t; typedef struct xfs_attr_leafblock { @@ -143,8 +143,8 @@ typedef struct xfs_attr_leafblock { static inline xfs_attr_leaf_name_remote_t * xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) { - return (xfs_attr_leaf_name_remote_t *) &((char *) - (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; + return (xfs_attr_leaf_name_remote_t *) + &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; } #define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \ @@ -152,16 +152,15 @@ xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) static inline xfs_attr_leaf_name_local_t * xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) { - return (xfs_attr_leaf_name_local_t *) &((char *) - (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; + return (xfs_attr_leaf_name_local_t *) + &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; } #define XFS_ATTR_LEAF_NAME(leafp,idx) \ xfs_attr_leaf_name(leafp,idx) static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) { - return (&((char *) - (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]); + return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; } /* diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h index ffed6ca81a5..f67f917803b 100644 --- a/fs/xfs/xfs_attr_sf.h +++ b/fs/xfs/xfs_attr_sf.h @@ -32,8 +32,8 @@ struct xfs_inode; */ typedef struct xfs_attr_shortform { struct xfs_attr_sf_hdr { /* constant-structure header block */ - __uint16_t totsize; /* total bytes in shortform list */ - __uint8_t count; /* count of active entries */ + __be16 totsize; /* total bytes in shortform list */ + __u8 count; /* count of active entries */ } hdr; struct xfs_attr_sf_entry { __uint8_t namelen; /* actual length of name (no NULL) */ @@ -66,8 +66,8 @@ typedef struct xfs_attr_sf_sort { #define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ ((xfs_attr_sf_entry_t *)((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep))) #define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \ - (INT_GET(((xfs_attr_shortform_t *) \ - ((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT)) + (be16_to_cpu(((xfs_attr_shortform_t *) \ + ((dp)->i_afp->if_u1.if_data))->hdr.totsize)) #if defined(XFS_ATTR_TRACE) /* diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 70625e577c7..2d702e4a74a 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -89,7 +89,7 @@ xfs_bmap_add_attrfork_local( int *flags); /* inode logging flags */ /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after allocating space (or doing a delayed allocation). */ STATIC int /* error */ @@ -97,7 +97,7 @@ xfs_bmap_add_extent( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ @@ -113,7 +113,7 @@ xfs_bmap_add_extent_delay_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ @@ -129,7 +129,7 @@ xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp,/* inode logging flags */ int rsvd); /* OK to allocate reserved blocks */ @@ -142,7 +142,7 @@ xfs_bmap_add_extent_hole_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp, /* inode logging flags */ int whichfork); /* data or attr fork */ @@ -155,7 +155,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp); /* inode logging flags */ /* @@ -169,7 +169,7 @@ xfs_bmap_alloc( /* * Transform a btree format file with only one leaf node, where the * extents list will fit in the inode, into an extents format file. - * Since the extent list is already in-core, all we have to do is + * Since the file extents are already in-core, all we have to do is * give up the space for the btree root and pitch the leaf block. */ STATIC int /* error */ @@ -191,7 +191,7 @@ xfs_bmap_check_extents( #endif /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after removing space (or undoing a delayed allocation). */ STATIC int /* error */ @@ -201,7 +201,7 @@ xfs_bmap_del_extent( xfs_extnum_t idx, /* extent number to update/insert */ xfs_bmap_free_t *flist, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp,/* inode logging flags */ int whichfork, /* data or attr fork */ int rsvd); /* OK to allocate reserved blocks */ @@ -217,18 +217,6 @@ xfs_bmap_del_free( xfs_bmap_free_item_t *free); /* list item to be freed */ /* - * Remove count entries from the extents array for inode "ip", starting - * at index "idx". Copies the remaining items down over the deleted ones, - * and gives back the excess memory. - */ -STATIC void -xfs_bmap_delete_exlist( - xfs_inode_t *ip, /* incode inode pointer */ - xfs_extnum_t idx, /* starting delete index */ - xfs_extnum_t count, /* count of items to delete */ - int whichfork); /* data or attr fork */ - -/* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. */ @@ -244,18 +232,6 @@ xfs_bmap_extents_to_btree( int whichfork); /* data or attr fork */ /* - * Insert new item(s) in the extent list for inode "ip". - * Count new items are inserted at offset idx. - */ -STATIC void -xfs_bmap_insert_exlist( - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* starting index of new items */ - xfs_extnum_t count, /* number of inserted items */ - xfs_bmbt_irec_t *new, /* items to insert */ - int whichfork); /* data or attr fork */ - -/* * Convert a local file to an extents file. * This code is sort of bogus, since the file data needs to get * logged so it won't be lost. The bmap-level manipulations are ok, though. @@ -316,7 +292,7 @@ xfs_bmap_trace_addentry( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + * Add bmap trace entry prior to a call to xfs_iext_remove. */ STATIC void xfs_bmap_trace_delete( @@ -328,7 +304,7 @@ xfs_bmap_trace_delete( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * Add bmap trace entry prior to a call to xfs_iext_insert, or * reading in the extents list from the disk (in the btree). */ STATIC void @@ -343,7 +319,7 @@ xfs_bmap_trace_insert( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry after updating an extent list entry in place. + * Add bmap trace entry after updating an extent record in place. */ STATIC void xfs_bmap_trace_post_update( @@ -354,7 +330,7 @@ xfs_bmap_trace_post_update( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to updating an extent list entry in place. + * Add bmap trace entry prior to updating an extent record in place. */ STATIC void xfs_bmap_trace_pre_update( @@ -413,19 +389,24 @@ STATIC int xfs_bmap_count_tree( xfs_mount_t *mp, xfs_trans_t *tp, + xfs_ifork_t *ifp, xfs_fsblock_t blockno, int levelin, int *count); STATIC int xfs_bmap_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_extnum_t idx, int numrecs, int *count); STATIC int xfs_bmap_disk_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_mount_t *mp, + xfs_extnum_t idx, + xfs_bmbt_block_t *block, int numrecs, int *count); @@ -537,7 +518,7 @@ xfs_bmap_add_attrfork_local( } /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after allocating space (or doing a delayed allocation). */ STATIC int /* error */ @@ -545,7 +526,7 @@ xfs_bmap_add_extent( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ @@ -578,7 +559,7 @@ xfs_bmap_add_extent( if (nextents == 0) { xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, NULL, whichfork); - xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork); + xfs_iext_insert(ifp, 0, 1, new); ASSERT(cur == NULL); ifp->if_lastex = 0; if (!ISNULLSTARTBLOCK(new->br_startblock)) { @@ -614,7 +595,7 @@ xfs_bmap_add_extent( /* * Get the record referred to by idx. */ - xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev); /* * If it's a real allocation record, and the new allocation ends * after the start of the referred to record, then we're filling @@ -714,14 +695,13 @@ xfs_bmap_add_extent_delay_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ int rsvd) /* OK to use reserved data block allocation */ { - xfs_bmbt_rec_t *base; /* base of extent entry list */ xfs_btree_cur_t *cur; /* btree cursor */ int diff; /* temp value */ xfs_bmbt_rec_t *ep; /* extent entry for idx */ @@ -730,6 +710,7 @@ xfs_bmap_add_extent_delay_real( static char fname[] = "xfs_bmap_add_extent_delay_real"; #endif int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ @@ -763,8 +744,8 @@ xfs_bmap_add_extent_delay_real( * Set up a bunch of variables to make the tests simpler. */ cur = *curp; - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &PREV); new_endoff = new->br_startoff + new->br_blockcount; ASSERT(PREV.br_startoff <= new->br_startoff); @@ -781,7 +762,7 @@ xfs_bmap_add_extent_delay_real( * Don't set contiguous if the combined extent would be too large. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &LEFT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, @@ -798,7 +779,7 @@ xfs_bmap_add_extent_delay_real( if (STATE_SET_TEST(RIGHT_VALID, idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { - xfs_bmbt_get_all(ep + 1, &RIGHT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, @@ -825,14 +806,14 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 2); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents--; if (cur == NULL) @@ -867,14 +848,14 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 1); if (cur == NULL) rval = XFS_ILOG_DEXT; else { @@ -908,7 +889,7 @@ xfs_bmap_add_extent_delay_real( ip->i_df.if_lastex = idx; xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx + 1, 1); if (cur == NULL) rval = XFS_ILOG_DEXT; else { @@ -964,7 +945,7 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); @@ -1010,7 +991,7 @@ xfs_bmap_add_extent_delay_real( xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx, 1, new); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; if (cur == NULL) @@ -1039,8 +1020,7 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); - base = ip->i_df.if_u1.if_extents; - ep = &base[idx + 1]; + ep = xfs_iext_get_ext(ifp, idx + 1); xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, XFS_DATA_FORK); @@ -1058,7 +1038,8 @@ xfs_bmap_add_extent_delay_real( xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, XFS_DATA_FORK); xfs_bmbt_set_blockcount(ep, temp); - xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), + new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, RIGHT.br_state); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, @@ -1098,7 +1079,7 @@ xfs_bmap_add_extent_delay_real( xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 1, new); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1127,8 +1108,7 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); *dnew = temp; @@ -1149,7 +1129,7 @@ xfs_bmap_add_extent_delay_real( r[1].br_blockcount = temp2; xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 2, &r[0]); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1204,13 +1184,13 @@ xfs_bmap_add_extent_delay_real( } } } - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, XFS_DATA_FORK); - xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2)); + xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), + NULLSTARTBLOCK((int)temp2)); xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, XFS_DATA_FORK); *dnew = temp + temp2; @@ -1254,10 +1234,9 @@ xfs_bmap_add_extent_unwritten_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp) /* inode logging flags */ { - xfs_bmbt_rec_t *base; /* base of extent entry list */ xfs_btree_cur_t *cur; /* btree cursor */ xfs_bmbt_rec_t *ep; /* extent entry for idx */ int error; /* error return value */ @@ -1265,6 +1244,7 @@ xfs_bmap_add_extent_unwritten_real( static char fname[] = "xfs_bmap_add_extent_unwritten_real"; #endif int i; /* temp state */ + xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_exntst_t newext; /* new extent state */ xfs_exntst_t oldext; /* old extent state */ @@ -1298,8 +1278,8 @@ xfs_bmap_add_extent_unwritten_real( */ error = 0; cur = *curp; - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &PREV); newext = new->br_state; oldext = (newext == XFS_EXT_UNWRITTEN) ? @@ -1320,7 +1300,7 @@ xfs_bmap_add_extent_unwritten_real( * Don't set contiguous if the combined extent would be too large. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &LEFT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, @@ -1337,7 +1317,7 @@ xfs_bmap_add_extent_unwritten_real( if (STATE_SET_TEST(RIGHT_VALID, idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { - xfs_bmbt_get_all(ep + 1, &RIGHT); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, @@ -1363,14 +1343,14 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 2); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents -= 2; if (cur == NULL) @@ -1409,14 +1389,14 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 1); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -1456,7 +1436,7 @@ xfs_bmap_add_extent_unwritten_real( ip->i_df.if_lastex = idx; xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx + 1, 1); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -1516,7 +1496,7 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); @@ -1571,7 +1551,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx, 1, new); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; if (cur == NULL) @@ -1609,7 +1589,8 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_blockcount - new->br_blockcount); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, XFS_DATA_FORK); - xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, + xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), + new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, newext); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, XFS_DATA_FORK); @@ -1649,7 +1630,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 1, new); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1696,7 +1677,7 @@ xfs_bmap_add_extent_unwritten_real( r[1].br_state = oldext; xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); + xfs_iext_insert(ifp, idx + 1, 2, &r[0]); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents += 2; if (cur == NULL) @@ -1770,15 +1751,15 @@ xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp, /* inode logging flags */ int rsvd) /* OK to allocate reserved blocks */ { - xfs_bmbt_rec_t *base; /* base of extent entry list */ - xfs_bmbt_rec_t *ep; /* extent list entry for idx */ + xfs_bmbt_rec_t *ep; /* extent record for idx */ #ifdef XFS_BMAP_TRACE static char fname[] = "xfs_bmap_add_extent_hole_delay"; #endif + xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_irec_t left; /* left neighbor extent entry */ xfs_filblks_t newlen=0; /* new indirect size */ xfs_filblks_t oldlen=0; /* old indirect size */ @@ -1799,15 +1780,15 @@ xfs_bmap_add_extent_hole_delay( ((state &= ~MASK(b)), 0)) #define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) - base = ip->i_df.if_u1.if_extents; - ep = &base[idx]; + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + ep = xfs_iext_get_ext(ifp, idx); state = 0; ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); /* * Check and set flags if this segment has a left neighbor */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &left); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* @@ -1844,23 +1825,24 @@ xfs_bmap_add_extent_hole_delay( /* * New allocation is contiguous with delayed allocations * on the left and on the right. - * Merge all three into a single extent list entry. + * Merge all three into a single extent record. */ temp = left.br_blockcount + new->br_blockcount + right.br_blockcount; xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, temp); + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); oldlen = STARTBLOCKVAL(left.br_startblock) + STARTBLOCKVAL(new->br_startblock) + STARTBLOCKVAL(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); - xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), + NULLSTARTBLOCK((int)newlen)); xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, XFS_DATA_FORK); - xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); + xfs_iext_remove(ifp, idx, 1); ip->i_df.if_lastex = idx - 1; break; @@ -1873,11 +1855,12 @@ xfs_bmap_add_extent_hole_delay( temp = left.br_blockcount + new->br_blockcount; xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(ep - 1, temp); + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); oldlen = STARTBLOCKVAL(left.br_startblock) + STARTBLOCKVAL(new->br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); - xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); + xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), + NULLSTARTBLOCK((int)newlen)); xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; @@ -1909,7 +1892,7 @@ xfs_bmap_add_extent_hole_delay( oldlen = newlen = 0; xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); + xfs_iext_insert(ifp, idx, 1, new); ip->i_df.if_lastex = idx; break; } @@ -1940,7 +1923,7 @@ xfs_bmap_add_extent_hole_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to put in extent list */ + xfs_bmbt_irec_t *new, /* new data to add to file extents */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { @@ -1970,13 +1953,13 @@ xfs_bmap_add_extent_hole_real( ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); - ep = &ifp->if_u1.if_extents[idx]; + ep = xfs_iext_get_ext(ifp, idx); state = 0; /* * Check and set flags if this segment has a left neighbor. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(ep - 1, &left); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* @@ -2019,18 +2002,18 @@ xfs_bmap_add_extent_hole_real( /* * New allocation is contiguous with real allocations on the * left and on the right. - * Merge all three into a single extent list entry. + * Merge all three into a single extent record. */ xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, whichfork); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), left.br_blockcount + new->br_blockcount + right.br_blockcount); xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, whichfork); xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, whichfork); - xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + xfs_iext_remove(ifp, idx, 1); ifp->if_lastex = idx - 1; XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) - 1); @@ -2062,7 +2045,7 @@ xfs_bmap_add_extent_hole_real( * Merge the new allocation with the left neighbor. */ xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); - xfs_bmbt_set_blockcount(ep - 1, + xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), left.br_blockcount + new->br_blockcount); xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); ifp->if_lastex = idx - 1; @@ -2116,7 +2099,7 @@ xfs_bmap_add_extent_hole_real( */ xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, whichfork); - xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork); + xfs_iext_insert(ifp, idx, 1, new); ifp->if_lastex = idx; XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) + 1); @@ -2311,25 +2294,15 @@ xfs_bmap_extsize_align( #define XFS_ALLOC_GAP_UNITS 4 -/* - * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. - * It figures out where to ask the underlying allocator to put the new extent. - */ STATIC int -xfs_bmap_alloc( +xfs_bmap_adjacent( xfs_bmalloca_t *ap) /* bmap alloc argument struct */ { xfs_fsblock_t adjust; /* adjustment to block numbers */ - xfs_alloctype_t atype=0; /* type for allocation routines */ - int error; /* error return value */ xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ xfs_mount_t *mp; /* mount point structure */ int nullfb; /* true if ap->firstblock isn't set */ int rt; /* true if inode is realtime */ - xfs_extlen_t prod = 0; /* product factor for allocators */ - xfs_extlen_t ralen = 0; /* realtime allocation length */ - xfs_extlen_t align; /* minimum allocation alignment */ - xfs_rtblock_t rtx; #define ISVALID(x,y) \ (rt ? \ @@ -2338,75 +2311,10 @@ xfs_bmap_alloc( XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) - /* - * Set up variables. - */ mp = ap->ip->i_mount; nullfb = ap->firstblock == NULLFSBLOCK; rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); - if (rt) { - align = ap->ip->i_d.di_extsize ? - ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; - /* Set prod to match the extent size */ - prod = align / mp->m_sb.sb_rextsize; - - error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, - align, rt, ap->eof, 0, - ap->conv, &ap->off, &ap->alen); - if (error) - return error; - ASSERT(ap->alen); - ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); - - /* - * If the offset & length are not perfectly aligned - * then kill prod, it will just get us in trouble. - */ - if (do_mod(ap->off, align) || ap->alen % align) - prod = 1; - /* - * Set ralen to be the actual requested length in rtextents. - */ - ralen = ap->alen / mp->m_sb.sb_rextsize; - /* - * If the old value was close enough to MAXEXTLEN that - * we rounded up to it, cut it back so it's valid again. - * Note that if it's a really large request (bigger than - * MAXEXTLEN), we don't hear about that number, and can't - * adjust the starting point to match it. - */ - if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) - ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; - /* - * If it's an allocation to an empty file at offset 0, - * pick an extent that will space things out in the rt area. - */ - if (ap->eof && ap->off == 0) { - error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); - if (error) - return error; - ap->rval = rtx * mp->m_sb.sb_rextsize; - } else - ap->rval = 0; - } else { - align = (ap->userdata && ap->ip->i_d.di_extsize && - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? - ap->ip->i_d.di_extsize : 0; - if (unlikely(align)) { - error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, - align, rt, - ap->eof, 0, ap->conv, - &ap->off, &ap->alen); - ASSERT(!error); - ASSERT(ap->alen); - } - if (nullfb) - ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); - else - ap->rval = ap->firstblock; - } - /* * If allocating at eof, and there's a previous real block, * try to use it's last block as our starting point. @@ -2531,287 +2439,384 @@ xfs_bmap_alloc( else if (gotbno != NULLFSBLOCK) ap->rval = gotbno; } +#undef ISVALID + return 0; +} + +STATIC int +xfs_bmap_rtalloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + xfs_alloctype_t atype = 0; /* type for allocation routines */ + int error; /* error return value */ + xfs_mount_t *mp; /* mount point structure */ + xfs_extlen_t prod = 0; /* product factor for allocators */ + xfs_extlen_t ralen = 0; /* realtime allocation length */ + xfs_extlen_t align; /* minimum allocation alignment */ + xfs_rtblock_t rtx; /* realtime extent number */ + xfs_rtblock_t rtb; + + mp = ap->ip->i_mount; + align = ap->ip->i_d.di_extsize ? + ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; + prod = align / mp->m_sb.sb_rextsize; + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, + align, 1, ap->eof, 0, + ap->conv, &ap->off, &ap->alen); + if (error) + return error; + ASSERT(ap->alen); + ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); + + /* + * If the offset & length are not perfectly aligned + * then kill prod, it will just get us in trouble. + */ + if (do_mod(ap->off, align) || ap->alen % align) + prod = 1; + /* + * Set ralen to be the actual requested length in rtextents. + */ + ralen = ap->alen / mp->m_sb.sb_rextsize; + /* + * If the old value was close enough to MAXEXTLEN that + * we rounded up to it, cut it back so it's valid again. + * Note that if it's a really large request (bigger than + * MAXEXTLEN), we don't hear about that number, and can't + * adjust the starting point to match it. + */ + if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) + ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; + /* + * If it's an allocation to an empty file at offset 0, + * pick an extent that will space things out in the rt area. + */ + if (ap->eof && ap->off == 0) { + error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); + if (error) + return error; + ap->rval = rtx * mp->m_sb.sb_rextsize; + } else { + ap->rval = 0; + } + + xfs_bmap_adjacent(ap); + + /* + * Realtime allocation, done through xfs_rtallocate_extent. + */ + atype = ap->rval == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; + do_div(ap->rval, mp->m_sb.sb_rextsize); + rtb = ap->rval; + ap->alen = ralen; + if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, + &ralen, atype, ap->wasdel, prod, &rtb))) + return error; + if (rtb == NULLFSBLOCK && prod > 1 && + (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, + ap->alen, &ralen, atype, + ap->wasdel, 1, &rtb))) + return error; + ap->rval = rtb; + if (ap->rval != NULLFSBLOCK) { + ap->rval *= mp->m_sb.sb_rextsize; + ralen *= mp->m_sb.sb_rextsize; + ap->alen = ralen; + ap->ip->i_d.di_nblocks += ralen; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= ralen; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : + XFS_TRANS_DQ_RTBCOUNT, (long) ralen); + } else { + ap->alen = 0; + } + return 0; +} + +STATIC int +xfs_bmap_btalloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + xfs_mount_t *mp; /* mount point structure */ + xfs_alloctype_t atype = 0; /* type for allocation routines */ + xfs_extlen_t align; /* minimum allocation alignment */ + xfs_agnumber_t ag; + xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ + xfs_agnumber_t startag; + xfs_alloc_arg_t args; + xfs_extlen_t blen; + xfs_extlen_t delta; + xfs_extlen_t longest; + xfs_extlen_t need; + xfs_extlen_t nextminlen = 0; + xfs_perag_t *pag; + int nullfb; /* true if ap->firstblock isn't set */ + int isaligned; + int notinit; + int tryagain; + int error; + + mp = ap->ip->i_mount; + align = (ap->userdata && ap->ip->i_d.di_extsize && + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? + ap->ip->i_d.di_extsize : 0; + if (unlikely(align)) { + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, + align, 0, ap->eof, 0, ap->conv, + &ap->off, &ap->alen); + ASSERT(!error); + ASSERT(ap->alen); + } + nullfb = ap->firstblock == NULLFSBLOCK; + fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); + if (nullfb) + ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); + else + ap->rval = ap->firstblock; + + xfs_bmap_adjacent(ap); + /* * If allowed, use ap->rval; otherwise must use firstblock since * it's in the right allocation group. */ - if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) + if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) ; else ap->rval = ap->firstblock; /* - * Realtime allocation, done through xfs_rtallocate_extent. + * Normal allocation, done through xfs_alloc_vextent. */ - if (rt) { -#ifndef __KERNEL__ - ASSERT(0); -#else - xfs_rtblock_t rtb; - - atype = ap->rval == 0 ? - XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; - do_div(ap->rval, mp->m_sb.sb_rextsize); - rtb = ap->rval; - ap->alen = ralen; - if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, - &ralen, atype, ap->wasdel, prod, &rtb))) - return error; - if (rtb == NULLFSBLOCK && prod > 1 && - (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, - ap->alen, &ralen, atype, - ap->wasdel, 1, &rtb))) - return error; - ap->rval = rtb; - if (ap->rval != NULLFSBLOCK) { - ap->rval *= mp->m_sb.sb_rextsize; - ralen *= mp->m_sb.sb_rextsize; - ap->alen = ralen; - ap->ip->i_d.di_nblocks += ralen; - xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); - if (ap->wasdel) - ap->ip->i_delayed_blks -= ralen; + tryagain = isaligned = 0; + args.tp = ap->tp; + args.mp = mp; + args.fsbno = ap->rval; + args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); + blen = 0; + if (nullfb) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.total = ap->total; + /* + * Find the longest available space. + * We're going to try for the whole allocation at once. + */ + startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); + notinit = 0; + down_read(&mp->m_peraglock); + while (blen < ap->alen) { + pag = &mp->m_perag[ag]; + if (!pag->pagf_init && + (error = xfs_alloc_pagf_init(mp, args.tp, + ag, XFS_ALLOC_FLAG_TRYLOCK))) { + up_read(&mp->m_peraglock); + return error; + } /* - * Adjust the disk quota also. This was reserved - * earlier. + * See xfs_alloc_fix_freelist... */ - XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, - ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : - XFS_TRANS_DQ_RTBCOUNT, - (long) ralen); - } else - ap->alen = 0; -#endif /* __KERNEL__ */ + if (pag->pagf_init) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? + need - pag->pagf_flcount : 0; + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || + pag->pagf_longest > 0); + if (blen < longest) + blen = longest; + } else + notinit = 1; + if (++ag == mp->m_sb.sb_agcount) + ag = 0; + if (ag == startag) + break; + } + up_read(&mp->m_peraglock); + /* + * Since the above loop did a BUF_TRYLOCK, it is + * possible that there is space for this request. + */ + if (notinit || blen < ap->minlen) + args.minlen = ap->minlen; + /* + * If the best seen length is less than the request + * length, use the best as the minimum. + */ + else if (blen < ap->alen) + args.minlen = blen; + /* + * Otherwise we've seen an extent as big as alen, + * use that as the minimum. + */ + else + args.minlen = ap->alen; + } else if (ap->low) { + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = args.minlen = ap->minlen; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.total = ap->total; + args.minlen = ap->minlen; + } + if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { + args.prod = ap->ip->i_d.di_extsize; + if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); + } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) { + args.prod = 1; + args.mod = 0; + } else { + args.prod = NBPP >> mp->m_sb.sb_blocklog; + if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); } /* - * Normal allocation, done through xfs_alloc_vextent. + * If we are not low on available data blocks, and the + * underlying logical volume manager is a stripe, and + * the file offset is zero then try to allocate data + * blocks on stripe unit boundary. + * NOTE: ap->aeof is only set if the allocation length + * is >= the stripe unit and the allocation offset is + * at the end of file. */ - else { - xfs_agnumber_t ag; - xfs_alloc_arg_t args; - xfs_extlen_t blen; - xfs_extlen_t delta; - int isaligned; - xfs_extlen_t longest; - xfs_extlen_t need; - xfs_extlen_t nextminlen=0; - int notinit; - xfs_perag_t *pag; - xfs_agnumber_t startag; - int tryagain; - - tryagain = isaligned = 0; - args.tp = ap->tp; - args.mp = mp; - args.fsbno = ap->rval; - args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); - blen = 0; - if (nullfb) { - args.type = XFS_ALLOCTYPE_START_BNO; - args.total = ap->total; - /* - * Find the longest available space. - * We're going to try for the whole allocation at once. - */ - startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); - notinit = 0; - down_read(&mp->m_peraglock); - while (blen < ap->alen) { - pag = &mp->m_perag[ag]; - if (!pag->pagf_init && - (error = xfs_alloc_pagf_init(mp, args.tp, - ag, XFS_ALLOC_FLAG_TRYLOCK))) { - up_read(&mp->m_peraglock); - return error; - } - /* - * See xfs_alloc_fix_freelist... - */ - if (pag->pagf_init) { - need = XFS_MIN_FREELIST_PAG(pag, mp); - delta = need > pag->pagf_flcount ? - need - pag->pagf_flcount : 0; - longest = (pag->pagf_longest > delta) ? - (pag->pagf_longest - delta) : - (pag->pagf_flcount > 0 || - pag->pagf_longest > 0); - if (blen < longest) - blen = longest; - } else - notinit = 1; - if (++ag == mp->m_sb.sb_agcount) - ag = 0; - if (ag == startag) - break; - } - up_read(&mp->m_peraglock); + if (!ap->low && ap->aeof) { + if (!ap->off) { + args.alignment = mp->m_dalign; + atype = args.type; + isaligned = 1; /* - * Since the above loop did a BUF_TRYLOCK, it is - * possible that there is space for this request. + * Adjust for alignment */ - if (notinit || blen < ap->minlen) - args.minlen = ap->minlen; + if (blen > args.alignment && blen <= ap->alen) + args.minlen = blen - args.alignment; + args.minalignslop = 0; + } else { /* - * If the best seen length is less than the request - * length, use the best as the minimum. + * First try an exact bno allocation. + * If it fails then do a near or start bno + * allocation with alignment turned on. */ - else if (blen < ap->alen) - args.minlen = blen; + atype = args.type; + tryagain = 1; + args.type = XFS_ALLOCTYPE_THIS_BNO; + args.alignment = 1; /* - * Otherwise we've seen an extent as big as alen, - * use that as the minimum. + * Compute the minlen+alignment for the + * next case. Set slop so that the value + * of minlen+alignment+slop doesn't go up + * between the calls. */ + if (blen > mp->m_dalign && blen <= ap->alen) + nextminlen = blen - mp->m_dalign; else - args.minlen = ap->alen; - } else if (ap->low) { - args.type = XFS_ALLOCTYPE_FIRST_AG; - args.total = args.minlen = ap->minlen; - } else { - args.type = XFS_ALLOCTYPE_NEAR_BNO; - args.total = ap->total; - args.minlen = ap->minlen; - } - if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { - args.prod = ap->ip->i_d.di_extsize; - if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) - args.mod = (xfs_extlen_t)(args.prod - args.mod); - } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) { - args.prod = 1; - args.mod = 0; - } else { - args.prod = NBPP >> mp->m_sb.sb_blocklog; - if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) - args.mod = (xfs_extlen_t)(args.prod - args.mod); + nextminlen = args.minlen; + if (nextminlen + mp->m_dalign > args.minlen + 1) + args.minalignslop = + nextminlen + mp->m_dalign - + args.minlen - 1; + else + args.minalignslop = 0; } + } else { + args.alignment = 1; + args.minalignslop = 0; + } + args.minleft = ap->minleft; + args.wasdel = ap->wasdel; + args.isfl = 0; + args.userdata = ap->userdata; + if ((error = xfs_alloc_vextent(&args))) + return error; + if (tryagain && args.fsbno == NULLFSBLOCK) { /* - * If we are not low on available data blocks, and the - * underlying logical volume manager is a stripe, and - * the file offset is zero then try to allocate data - * blocks on stripe unit boundary. - * NOTE: ap->aeof is only set if the allocation length - * is >= the stripe unit and the allocation offset is - * at the end of file. + * Exact allocation failed. Now try with alignment + * turned on. */ - if (!ap->low && ap->aeof) { - if (!ap->off) { - args.alignment = mp->m_dalign; - atype = args.type; - isaligned = 1; - /* - * Adjust for alignment - */ - if (blen > args.alignment && blen <= ap->alen) - args.minlen = blen - args.alignment; - args.minalignslop = 0; - } else { - /* - * First try an exact bno allocation. - * If it fails then do a near or start bno - * allocation with alignment turned on. - */ - atype = args.type; - tryagain = 1; - args.type = XFS_ALLOCTYPE_THIS_BNO; - args.alignment = 1; - /* - * Compute the minlen+alignment for the - * next case. Set slop so that the value - * of minlen+alignment+slop doesn't go up - * between the calls. - */ - if (blen > mp->m_dalign && blen <= ap->alen) - nextminlen = blen - mp->m_dalign; - else - nextminlen = args.minlen; - if (nextminlen + mp->m_dalign > args.minlen + 1) - args.minalignslop = - nextminlen + mp->m_dalign - - args.minlen - 1; - else - args.minalignslop = 0; - } - } else { - args.alignment = 1; - args.minalignslop = 0; - } - args.minleft = ap->minleft; - args.wasdel = ap->wasdel; - args.isfl = 0; - args.userdata = ap->userdata; + args.type = atype; + args.fsbno = ap->rval; + args.alignment = mp->m_dalign; + args.minlen = nextminlen; + args.minalignslop = 0; + isaligned = 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (isaligned && args.fsbno == NULLFSBLOCK) { + /* + * allocation failed, so turn off alignment and + * try again. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb && + args.minlen > ap->minlen) { + args.minlen = ap->minlen; + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = ap->rval; if ((error = xfs_alloc_vextent(&args))) return error; - if (tryagain && args.fsbno == NULLFSBLOCK) { - /* - * Exact allocation failed. Now try with alignment - * turned on. - */ - args.type = atype; - args.fsbno = ap->rval; - args.alignment = mp->m_dalign; - args.minlen = nextminlen; - args.minalignslop = 0; - isaligned = 1; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (isaligned && args.fsbno == NULLFSBLOCK) { - /* - * allocation failed, so turn off alignment and - * try again. - */ - args.type = atype; - args.fsbno = ap->rval; - args.alignment = 0; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (args.fsbno == NULLFSBLOCK && nullfb && - args.minlen > ap->minlen) { - args.minlen = ap->minlen; - args.type = XFS_ALLOCTYPE_START_BNO; - args.fsbno = ap->rval; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (args.fsbno == NULLFSBLOCK && nullfb) { - args.fsbno = 0; - args.type = XFS_ALLOCTYPE_FIRST_AG; - args.total = ap->minlen; - args.minleft = 0; - if ((error = xfs_alloc_vextent(&args))) - return error; - ap->low = 1; - } - if (args.fsbno != NULLFSBLOCK) { - ap->firstblock = ap->rval = args.fsbno; - ASSERT(nullfb || fb_agno == args.agno || - (ap->low && fb_agno < args.agno)); - ap->alen = args.len; - ap->ip->i_d.di_nblocks += args.len; - xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); - if (ap->wasdel) - ap->ip->i_delayed_blks -= args.len; - /* - * Adjust the disk quota also. This was reserved - * earlier. - */ - XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, - ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : - XFS_TRANS_DQ_BCOUNT, - (long) args.len); - } else { - ap->rval = NULLFSBLOCK; - ap->alen = 0; - } + } + if (args.fsbno == NULLFSBLOCK && nullfb) { + args.fsbno = 0; + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = ap->minlen; + args.minleft = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + ap->low = 1; + } + if (args.fsbno != NULLFSBLOCK) { + ap->firstblock = ap->rval = args.fsbno; + ASSERT(nullfb || fb_agno == args.agno || + (ap->low && fb_agno < args.agno)); + ap->alen = args.len; + ap->ip->i_d.di_nblocks += args.len; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= args.len; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : + XFS_TRANS_DQ_BCOUNT, + (long) args.len); + } else { + ap->rval = NULLFSBLOCK; + ap->alen = 0; } return 0; -#undef ISVALID +} + +/* + * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. + * It figures out where to ask the underlying allocator to put the new extent. + */ +STATIC int +xfs_bmap_alloc( + xfs_bmalloca_t *ap) /* bmap alloc argument struct */ +{ + if ((ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata) + return xfs_bmap_rtalloc(ap); + return xfs_bmap_btalloc(ap); } /* * Transform a btree format file with only one leaf node, where the * extents list will fit in the inode, into an extents format file. - * Since the extent list is already in-core, all we have to do is + * Since the file extents are already in-core, all we have to do is * give up the space for the btree root and pitch the leaf block. */ STATIC int /* error */ @@ -2868,7 +2873,7 @@ xfs_bmap_btree_to_extents( } /* - * Called by xfs_bmapi to update extent list structure and the btree + * Called by xfs_bmapi to update file extent records and the btree * after removing space (or undoing a delayed allocation). */ STATIC int /* error */ @@ -2878,7 +2883,7 @@ xfs_bmap_del_extent( xfs_extnum_t idx, /* extent number to update/delete */ xfs_bmap_free_t *flist, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *del, /* data to remove from extent list */ + xfs_bmbt_irec_t *del, /* data to remove from extents */ int *logflagsp, /* inode logging flags */ int whichfork, /* data or attr fork */ int rsvd) /* OK to allocate reserved blocks */ @@ -2903,7 +2908,6 @@ xfs_bmap_del_extent( xfs_filblks_t nblks; /* quota/sb block count */ xfs_bmbt_irec_t new; /* new record to be inserted */ /* REFERENCED */ - xfs_extnum_t nextents; /* number of extents in list */ uint qfield; /* quota field to update */ xfs_filblks_t temp; /* for indirect length calculations */ xfs_filblks_t temp2; /* for indirect length calculations */ @@ -2911,10 +2915,10 @@ xfs_bmap_del_extent( XFS_STATS_INC(xs_del_exlist); mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - ASSERT(idx >= 0 && idx < nextents); + ASSERT((idx >= 0) && (idx < ifp->if_bytes / + (uint)sizeof(xfs_bmbt_rec_t))); ASSERT(del->br_blockcount > 0); - ep = &ifp->if_u1.if_extents[idx]; + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &got); ASSERT(got.br_startoff <= del->br_startoff); del_endoff = del->br_startoff + del->br_blockcount; @@ -2990,7 +2994,7 @@ xfs_bmap_del_extent( * Matches the whole extent. Delete the entry. */ xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); - xfs_bmap_delete_exlist(ip, idx, 1, whichfork); + xfs_iext_remove(ifp, idx, 1); ifp->if_lastex = idx; if (delay) break; @@ -3160,7 +3164,7 @@ xfs_bmap_del_extent( xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, whichfork); - xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork); + xfs_iext_insert(ifp, idx + 1, 1, &new); ifp->if_lastex = idx + 1; break; } @@ -3213,31 +3217,6 @@ xfs_bmap_del_free( } /* - * Remove count entries from the extents array for inode "ip", starting - * at index "idx". Copies the remaining items down over the deleted ones, - * and gives back the excess memory. - */ -STATIC void -xfs_bmap_delete_exlist( - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* starting delete index */ - xfs_extnum_t count, /* count of items to delete */ - int whichfork) /* data or attr fork */ -{ - xfs_bmbt_rec_t *base; /* base of extent list */ - xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_extnum_t nextents; /* number of extents in list after */ - - ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(ifp->if_flags & XFS_IFEXTENTS); - base = ifp->if_u1.if_extents; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count; - memmove(&base[idx], &base[idx + count], - (nextents - idx) * sizeof(*base)); - xfs_iext_realloc(ip, -count, whichfork); -} - -/* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. */ @@ -3258,13 +3237,13 @@ xfs_bmap_extents_to_btree( xfs_bmbt_rec_t *arp; /* child record pointer */ xfs_bmbt_block_t *block; /* btree root block */ xfs_btree_cur_t *cur; /* bmap btree cursor */ - xfs_bmbt_rec_t *ep; /* extent list pointer */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int error; /* error return value */ - xfs_extnum_t i, cnt; /* extent list index */ + xfs_extnum_t i, cnt; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_key_t *kp; /* root block key pointer */ xfs_mount_t *mp; /* mount structure */ - xfs_extnum_t nextents; /* extent list size */ + xfs_extnum_t nextents; /* number of file extents */ xfs_bmbt_ptr_t *pp; /* root block address pointer */ ifp = XFS_IFORK_PTR(ip, whichfork); @@ -3343,7 +3322,8 @@ xfs_bmap_extents_to_btree( ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { + for (cnt = i = 0; i < nextents; i++) { + ep = xfs_iext_get_ext(ifp, i); if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { arp->l0 = INT_GET(ep->l0, ARCH_CONVERT); arp->l1 = INT_GET(ep->l1, ARCH_CONVERT); @@ -3373,34 +3353,6 @@ xfs_bmap_extents_to_btree( } /* - * Insert new item(s) in the extent list for inode "ip". - * Count new items are inserted at offset idx. - */ -STATIC void -xfs_bmap_insert_exlist( - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* starting index of new items */ - xfs_extnum_t count, /* number of inserted items */ - xfs_bmbt_irec_t *new, /* items to insert */ - int whichfork) /* data or attr fork */ -{ - xfs_bmbt_rec_t *base; /* extent list base */ - xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_extnum_t nextents; /* extent list size */ - xfs_extnum_t to; /* extent list index */ - - ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(ifp->if_flags & XFS_IFEXTENTS); - xfs_iext_realloc(ip, count, whichfork); - base = ifp->if_u1.if_extents; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - memmove(&base[idx + count], &base[idx], - (nextents - (idx + count)) * sizeof(*base)); - for (to = idx; to < idx + count; to++, new++) - xfs_bmbt_set_all(&base[to], new); -} - -/* * Helper routine to reset inode di_forkoff field when switching * attribute fork from local to extent format - we reset it where * possible to make space available for inline data fork extents. @@ -3457,12 +3409,13 @@ xfs_bmap_local_to_extents( error = 0; if (ifp->if_bytes) { xfs_alloc_arg_t args; /* allocation arguments */ - xfs_buf_t *bp; /* buffer for extent list block */ - xfs_bmbt_rec_t *ep; /* extent list pointer */ + xfs_buf_t *bp; /* buffer for extent block */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ args.tp = tp; args.mp = ip->i_mount; - ASSERT(ifp->if_flags & XFS_IFINLINE); + ASSERT((ifp->if_flags & + (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE); /* * Allocate a block. We know we need only one, since the * file currently fits in an inode. @@ -3492,8 +3445,8 @@ xfs_bmap_local_to_extents( xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_bmap_forkoff_reset(args.mp, ip, whichfork); xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); - xfs_iext_realloc(ip, 1, whichfork); - ep = ifp->if_u1.if_extents; + xfs_iext_add(ifp, 0, 1); + ep = xfs_iext_get_ext(ifp, 0); xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); XFS_IFORK_NEXT_SET(ip, whichfork, 1); @@ -3518,7 +3471,7 @@ xfs_bmbt_rec_t * /* pointer to found extent entry */ xfs_bmap_do_search_extents( xfs_bmbt_rec_t *base, /* base of extent list */ xfs_extnum_t lastx, /* last extent index used */ - xfs_extnum_t nextents, /* extent list size */ + xfs_extnum_t nextents, /* number of file extents */ xfs_fileoff_t bno, /* block number searched for */ int *eofp, /* out: end of file found */ xfs_extnum_t *lastxp, /* out: last extent index */ @@ -3569,9 +3522,9 @@ xfs_bmap_do_search_extents( got.br_blockcount = xfs_bmbt_get_blockcount(ep); *eofp = 0; } else { - /* binary search the extents array */ low = 0; high = nextents - 1; + /* binary search the extents array */ while (low <= high) { XFS_STATS_INC(xs_cmp_exlist); lastx = (low + high) >> 1; @@ -3622,6 +3575,57 @@ xfs_bmap_do_search_extents( } /* + * Search the extent records for the entry containing block bno. + * If bno lies in a hole, point to the next entry. If bno lies + * past eof, *eofp will be set, and *prevp will contain the last + * entry (null if none). Else, *lastxp will be set to the index + * of the found entry; *gotp will contain the entry. + */ +xfs_bmbt_rec_t * /* pointer to found extent entry */ +xfs_bmap_search_multi_extents( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_fileoff_t bno, /* block number searched for */ + int *eofp, /* out: end of file found */ + xfs_extnum_t *lastxp, /* out: last extent index */ + xfs_bmbt_irec_t *gotp, /* out: extent entry found */ + xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ +{ + xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_extnum_t lastx; /* last extent index */ + + /* + * Initialize the extent entry structure to catch access to + * uninitialized br_startblock field. + */ + gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; + gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; + gotp->br_state = XFS_EXT_INVALID; +#if XFS_BIG_BLKNOS + gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; +#else + gotp->br_startblock = 0xffffa5a5; +#endif + prevp->br_startoff = NULLFILEOFF; + + ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); + if (lastx > 0) { + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp); + } + if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { + xfs_bmbt_get_all(ep, gotp); + *eofp = 0; + } else { + if (lastx > 0) { + *gotp = *prevp; + } + *eofp = 1; + ep = NULL; + } + *lastxp = lastx; + return ep; +} + +/* * Search the extents list for the inode, for the extent containing bno. * If bno lies in a hole, point to the next entry. If bno lies past eof, * *eofp will be set, and *prevp will contain the last entry (null if none). @@ -3639,20 +3643,14 @@ xfs_bmap_search_extents( xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ { xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *base; /* base of extent list */ - xfs_extnum_t lastx; /* last extent index used */ - xfs_extnum_t nextents; /* extent list size */ - xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int rt; /* realtime flag */ XFS_STATS_INC(xs_look_exlist); ifp = XFS_IFORK_PTR(ip, whichfork); - lastx = ifp->if_lastex; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - base = &ifp->if_u1.if_extents[0]; - ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, - lastxp, gotp, prevp); + ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); + rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) { cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld " @@ -3732,7 +3730,7 @@ xfs_bmap_trace_addentry( } /* - * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. + * Add bmap trace entry prior to a call to xfs_iext_remove. */ STATIC void xfs_bmap_trace_delete( @@ -3747,13 +3745,13 @@ xfs_bmap_trace_delete( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx, - cnt, &ifp->if_u1.if_extents[idx], - cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL, + cnt, xfs_iext_get_ext(ifp, idx), + cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL, whichfork); } /* - * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or + * Add bmap trace entry prior to a call to xfs_iext_insert, or * reading in the extents list from the disk (in the btree). */ STATIC void @@ -3783,7 +3781,7 @@ xfs_bmap_trace_insert( } /* - * Add bmap trace entry after updating an extent list entry in place. + * Add bmap trace entry after updating an extent record in place. */ STATIC void xfs_bmap_trace_post_update( @@ -3797,11 +3795,11 @@ xfs_bmap_trace_post_update( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx, - 1, &ifp->if_u1.if_extents[idx], NULL, whichfork); + 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork); } /* - * Add bmap trace entry prior to updating an extent list entry in place. + * Add bmap trace entry prior to updating an extent record in place. */ STATIC void xfs_bmap_trace_pre_update( @@ -3815,7 +3813,7 @@ xfs_bmap_trace_pre_update( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1, - &ifp->if_u1.if_extents[idx], NULL, whichfork); + xfs_iext_get_ext(ifp, idx), NULL, whichfork); } #endif /* XFS_BMAP_TRACE */ @@ -3892,7 +3890,7 @@ xfs_bmap_add_attrfork( int rsvd) /* xact may use reserved blks */ { xfs_fsblock_t firstblock; /* 1st block/ag allocated */ - xfs_bmap_free_t flist; /* freed extent list */ + xfs_bmap_free_t flist; /* freed extent records */ xfs_mount_t *mp; /* mount structure */ xfs_trans_t *tp; /* transaction pointer */ unsigned long s; /* spinlock spl value */ @@ -4146,7 +4144,7 @@ xfs_bmap_finish( xfs_efd_log_item_t *efd; /* extent free data */ xfs_efi_log_item_t *efi; /* extent free intention */ int error; /* error return value */ - xfs_bmap_free_item_t *free; /* free extent list item */ + xfs_bmap_free_item_t *free; /* free extent item */ unsigned int logres; /* new log reservation */ unsigned int logcount; /* new log count */ xfs_mount_t *mp; /* filesystem mount structure */ @@ -4242,9 +4240,9 @@ xfs_bmap_first_unused( xfs_fileoff_t *first_unused, /* unused block */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ int error; /* error return value */ + int idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t lastaddr; /* last block number seen */ xfs_fileoff_t lowest; /* lowest useful block */ @@ -4265,10 +4263,8 @@ xfs_bmap_first_unused( return error; lowest = *first_unused; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - base = &ifp->if_u1.if_extents[0]; - for (lastaddr = 0, max = lowest, ep = base; - ep < &base[nextents]; - ep++) { + for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { + ep = xfs_iext_get_ext(ifp, idx); off = xfs_bmbt_get_startoff(ep); /* * See if the hole before this extent will work. @@ -4287,8 +4283,8 @@ xfs_bmap_first_unused( /* * Returns the file-relative block number of the last block + 1 before * last_block (input value) in the file. - * This is not based on i_size, it is based on the extent list. - * Returns 0 for local files, as they do not have an extent list. + * This is not based on i_size, it is based on the extent records. + * Returns 0 for local files, as they do not have extent records. */ int /* error */ xfs_bmap_last_before( @@ -4335,8 +4331,8 @@ xfs_bmap_last_before( /* * Returns the file-relative block number of the first block past eof in - * the file. This is not based on i_size, it is based on the extent list. - * Returns 0 for local files, as they do not have an extent list. + * the file. This is not based on i_size, it is based on the extent records. + * Returns 0 for local files, as they do not have extent records. */ int /* error */ xfs_bmap_last_offset( @@ -4345,7 +4341,6 @@ xfs_bmap_last_offset( xfs_fileoff_t *last_block, /* last block */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to last extent */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ @@ -4368,9 +4363,7 @@ xfs_bmap_last_offset( *last_block = 0; return 0; } - base = &ifp->if_u1.if_extents[0]; - ASSERT(base != NULL); - ep = &base[nextents - 1]; + ep = xfs_iext_get_ext(ifp, nextents - 1); *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep); return 0; } @@ -4400,7 +4393,7 @@ xfs_bmap_one_block( return 0; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); - ep = ifp->if_u1.if_extents; + ep = xfs_iext_get_ext(ifp, 0); xfs_bmbt_get_all(ep, &s); rval = s.br_startoff == 0 && s.br_blockcount == 1; if (rval && whichfork == XFS_DATA_FORK) @@ -4435,7 +4428,6 @@ xfs_bmap_read_extents( xfs_bmbt_ptr_t *pp; /* pointer to block address */ /* REFERENCED */ xfs_extnum_t room; /* number of entries there's room for */ - xfs_bmbt_rec_t *trp; /* target record pointer */ bno = NULLFSBLOCK; mp = ip->i_mount; @@ -4478,16 +4470,16 @@ xfs_bmap_read_extents( /* * Here with bp and block set to the leftmost leaf node in the tree. */ - room = ifp->if_bytes / (uint)sizeof(*trp); - trp = ifp->if_u1.if_extents; + room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); i = 0; /* - * Loop over all leaf nodes. Copy information to the extent list. + * Loop over all leaf nodes. Copy information to the extent records. */ for (;;) { - xfs_bmbt_rec_t *frp, *temp; + xfs_bmbt_rec_t *frp, *trp; xfs_fsblock_t nextbno; xfs_extnum_t num_recs; + xfs_extnum_t start; num_recs = be16_to_cpu(block->bb_numrecs); @@ -4511,12 +4503,13 @@ xfs_bmap_read_extents( if (nextbno != NULLFSBLOCK) xfs_btree_reada_bufl(mp, nextbno, 1); /* - * Copy records into the extent list. + * Copy records into the extent records. */ frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - temp = trp; - for (j = 0; j < num_recs; j++, frp++, trp++) { + start = i; + for (j = 0; j < num_recs; j++, i++, frp++) { + trp = xfs_iext_get_ext(ifp, i); trp->l0 = INT_GET(frp->l0, ARCH_CONVERT); trp->l1 = INT_GET(frp->l1, ARCH_CONVERT); } @@ -4526,14 +4519,14 @@ xfs_bmap_read_extents( * any "older" data bmap btree records for a * set bit in the "extent flag" position. */ - if (unlikely(xfs_check_nostate_extents(temp, num_recs))) { + if (unlikely(xfs_check_nostate_extents(ifp, + start, num_recs))) { XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); goto error0; } } - i += num_recs; xfs_trans_brelse(tp, bp); bno = nextbno; /* @@ -4546,7 +4539,7 @@ xfs_bmap_read_extents( return error; block = XFS_BUF_TO_BMBT_BLOCK(bp); } - ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp)); + ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); xfs_bmap_trace_exlist(fname, ip, i, whichfork); return 0; @@ -4557,7 +4550,7 @@ error0: #ifdef XFS_BMAP_TRACE /* - * Add bmap trace insert entries for all the contents of the extent list. + * Add bmap trace insert entries for all the contents of the extent records. */ void xfs_bmap_trace_exlist( @@ -4566,16 +4559,15 @@ xfs_bmap_trace_exlist( xfs_extnum_t cnt, /* count of entries in the list */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extent list */ - xfs_bmbt_rec_t *ep; /* current entry in extent list */ - xfs_extnum_t idx; /* extent list entry number */ + xfs_bmbt_rec_t *ep; /* current extent record */ + xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_irec_t s; /* extent list record */ + xfs_bmbt_irec_t s; /* file extent record */ ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base)); - base = ifp->if_u1.if_extents; - for (idx = 0, ep = base; idx < cnt; idx++, ep++) { + ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); + for (idx = 0; idx < cnt; idx++) { + ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_get_all(ep, &s); xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, whichfork); @@ -4661,14 +4653,10 @@ xfs_bmapi( xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */ xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_fileoff_t end; /* end of mapped file region */ - int eof; /* we've hit the end of extent list */ - char contig; /* allocation must be one extent */ - char delay; /* this request is for delayed alloc */ - char exact; /* don't do all of wasdelayed extent */ - char convert; /* unwritten extent I/O completion */ - xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + int eof; /* we've hit the end of extents */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int error; /* error return */ - xfs_bmbt_irec_t got; /* current extent list record */ + xfs_bmbt_irec_t got; /* current file extent record */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extlen_t indlen; /* indirect blocks length */ xfs_extnum_t lastx; /* last useful extent number */ @@ -4680,17 +4668,13 @@ xfs_bmapi( int nallocs; /* number of extents alloc\'d */ xfs_extnum_t nextents; /* number of extents in file */ xfs_fileoff_t obno; /* old block number (offset) */ - xfs_bmbt_irec_t prev; /* previous extent list record */ + xfs_bmbt_irec_t prev; /* previous file extent record */ int tmp_logflags; /* temp flags holder */ int whichfork; /* data or attr fork */ char inhole; /* current location is hole in file */ - char stateless; /* ignore state flag set */ - char trim; /* output trimmed to match range */ - char userdata; /* allocating non-metadata */ char wasdelay; /* old extent was delayed */ char wr; /* this is a write request */ char rt; /* this is a realtime file */ - char rsvd; /* OK to allocate reserved blocks */ #ifdef DEBUG xfs_fileoff_t orig_bno; /* original block number value */ int orig_flags; /* original flags arg value */ @@ -4727,15 +4711,8 @@ xfs_bmapi( XFS_STATS_INC(xs_blk_mapw); else XFS_STATS_INC(xs_blk_mapr); - delay = (flags & XFS_BMAPI_DELAY) != 0; - trim = (flags & XFS_BMAPI_ENTIRE) == 0; - userdata = (flags & XFS_BMAPI_METADATA) == 0; - convert = (flags & XFS_BMAPI_CONVERT) != 0; - exact = (flags & XFS_BMAPI_EXACT) != 0; - rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; - contig = (flags & XFS_BMAPI_CONTIG) != 0; /* - * stateless is used to combine extents which + * IGSTATE flag is used to combine extents which * differ only due to the state of the extents. * This technique is used from xfs_getbmap() * when the caller does not wish to see the @@ -4751,10 +4728,9 @@ xfs_bmapi( * xfs_strat_comp(), where the xfs_bmapi() call * is transactioned, and the extents combined. */ - stateless = (flags & XFS_BMAPI_IGSTATE) != 0; - if (stateless && wr) /* if writing unwritten space, no */ - wr = 0; /* allocations are allowed */ - ASSERT(wr || !delay); + if ((flags & XFS_BMAPI_IGSTATE) && wr) /* if writing unwritten space */ + wr = 0; /* no allocations are allowed */ + ASSERT(wr || !(flags & XFS_BMAPI_DELAY)); logflags = 0; nallocs = 0; cur = NULL; @@ -4789,7 +4765,7 @@ xfs_bmapi( if (eof && !wr) got.br_startoff = end; inhole = eof || got.br_startoff > bno; - wasdelay = wr && !inhole && !delay && + wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) && ISNULLSTARTBLOCK(got.br_startblock); /* * First, deal with the hole before the allocated space @@ -4801,11 +4777,11 @@ xfs_bmapi( * allocate the stuff asked for in this bmap call * but that wouldn't be as good. */ - if (wasdelay && !exact) { + if (wasdelay && !(flags & XFS_BMAPI_EXACT)) { alen = (xfs_extlen_t)got.br_blockcount; aoff = got.br_startoff; if (lastx != NULLEXTNUM && lastx) { - ep = &ifp->if_u1.if_extents[lastx - 1]; + ep = xfs_iext_get_ext(ifp, lastx - 1); xfs_bmbt_get_all(ep, &prev); } } else if (wasdelay) { @@ -4823,8 +4799,8 @@ xfs_bmapi( got.br_startoff - bno); aoff = bno; } - minlen = contig ? alen : 1; - if (delay) { + minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1; + if (flags & XFS_BMAPI_DELAY) { xfs_extlen_t extsz; /* Figure out the extent size, adjust alen */ @@ -4837,7 +4813,9 @@ xfs_bmapi( if (extsz) { error = xfs_bmap_extsize_align(mp, &got, &prev, extsz, - rt, eof, delay, convert, + rt, eof, + flags&XFS_BMAPI_DELAY, + flags&XFS_BMAPI_CONVERT, &aoff, &alen); ASSERT(!error); } @@ -4875,24 +4853,29 @@ xfs_bmapi( if (rt) { error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - -(extsz), rsvd); + -(extsz), (flags & + XFS_BMAPI_RSVBLOCKS)); } else { error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - -(alen), rsvd); + -(alen), (flags & + XFS_BMAPI_RSVBLOCKS)); } if (!error) { error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - -(indlen), rsvd); + -(indlen), (flags & + XFS_BMAPI_RSVBLOCKS)); if (error && rt) xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - extsz, rsvd); + extsz, (flags & + XFS_BMAPI_RSVBLOCKS)); else if (error) xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - alen, rsvd); + alen, (flags & + XFS_BMAPI_RSVBLOCKS)); } if (error) { @@ -4925,7 +4908,7 @@ xfs_bmapi( /* Indicate if this is the first user data * in the file, or just any user data. */ - if (userdata) { + if (!(flags & XFS_BMAPI_METADATA)) { bma.userdata = (aoff == 0) ? XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; @@ -4937,7 +4920,7 @@ xfs_bmapi( bma.firstblock = *firstblock; bma.alen = alen; bma.off = aoff; - bma.conv = convert; + bma.conv = (flags & XFS_BMAPI_CONVERT); bma.wasdel = wasdelay; bma.minlen = minlen; bma.low = flist->xbf_low; @@ -4948,7 +4931,8 @@ xfs_bmapi( * is larger than a stripe unit. */ if (mp->m_dalign && alen >= mp->m_dalign && - userdata && whichfork == XFS_DATA_FORK) { + (!(flags & XFS_BMAPI_METADATA)) && + (whichfork == XFS_DATA_FORK)) { if ((error = xfs_bmap_isaeof(ip, aoff, whichfork, &bma.aeof))) goto error0; @@ -5011,19 +4995,19 @@ xfs_bmapi( } error = xfs_bmap_add_extent(ip, lastx, &cur, &got, firstblock, flist, &tmp_logflags, whichfork, - rsvd); + (flags & XFS_BMAPI_RSVBLOCKS)); logflags |= tmp_logflags; if (error) goto error0; lastx = ifp->if_lastex; - ep = &ifp->if_u1.if_extents[lastx]; + ep = xfs_iext_get_ext(ifp, lastx); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); xfs_bmbt_get_all(ep, &got); ASSERT(got.br_startoff <= aoff); ASSERT(got.br_startoff + got.br_blockcount >= aoff + alen); #ifdef DEBUG - if (delay) { + if (flags & XFS_BMAPI_DELAY) { ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); } @@ -5052,14 +5036,15 @@ xfs_bmapi( * Then deal with the allocated space we found. */ ASSERT(ep != NULL); - if (trim && (got.br_startoff + got.br_blockcount > obno)) { + if (!(flags & XFS_BMAPI_ENTIRE) && + (got.br_startoff + got.br_blockcount > obno)) { if (obno > bno) bno = obno; ASSERT((bno >= obno) || (n == 0)); ASSERT(bno < end); mval->br_startoff = bno; if (ISNULLSTARTBLOCK(got.br_startblock)) { - ASSERT(!wr || delay); + ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); mval->br_startblock = DELAYSTARTBLOCK; } else mval->br_startblock = @@ -5081,7 +5066,7 @@ xfs_bmapi( } else { *mval = got; if (ISNULLSTARTBLOCK(mval->br_startblock)) { - ASSERT(!wr || delay); + ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); mval->br_startblock = DELAYSTARTBLOCK; } } @@ -5107,12 +5092,12 @@ xfs_bmapi( mval->br_state = XFS_EXT_NORM; error = xfs_bmap_add_extent(ip, lastx, &cur, mval, firstblock, flist, &tmp_logflags, whichfork, - rsvd); + (flags & XFS_BMAPI_RSVBLOCKS)); logflags |= tmp_logflags; if (error) goto error0; lastx = ifp->if_lastex; - ep = &ifp->if_u1.if_extents[lastx]; + ep = xfs_iext_get_ext(ifp, lastx); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); xfs_bmbt_get_all(ep, &got); /* @@ -5124,9 +5109,10 @@ xfs_bmapi( continue; } - ASSERT(!trim || + ASSERT((flags & XFS_BMAPI_ENTIRE) || ((mval->br_startoff + mval->br_blockcount) <= end)); - ASSERT(!trim || (mval->br_blockcount <= len) || + ASSERT((flags & XFS_BMAPI_ENTIRE) || + (mval->br_blockcount <= len) || (mval->br_startoff < obno)); bno = mval->br_startoff + mval->br_blockcount; len = end - bno; @@ -5141,7 +5127,8 @@ xfs_bmapi( mval[-1].br_startblock != HOLESTARTBLOCK && mval->br_startblock == mval[-1].br_startblock + mval[-1].br_blockcount && - (stateless || mval[-1].br_state == mval->br_state)) { + ((flags & XFS_BMAPI_IGSTATE) || + mval[-1].br_state == mval->br_state)) { ASSERT(mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount); mval[-1].br_blockcount += mval->br_blockcount; @@ -5168,8 +5155,7 @@ xfs_bmapi( /* * Else go on to the next record. */ - ep++; - lastx++; + ep = xfs_iext_get_ext(ifp, ++lastx); if (lastx >= nextents) { eof = 1; prev = got; @@ -5199,7 +5185,7 @@ xfs_bmapi( error0: /* * Log everything. Do this after conversion, there's no point in - * logging the extent list if we've converted to btree format. + * logging the extent records if we've converted to btree format. */ if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) @@ -5252,12 +5238,12 @@ xfs_bmapi_single( xfs_fsblock_t *fsb, /* output: mapped block */ xfs_fileoff_t bno) /* starting file offs. mapped */ { - int eof; /* we've hit the end of extent list */ + int eof; /* we've hit the end of extents */ int error; /* error return */ - xfs_bmbt_irec_t got; /* current extent list record */ + xfs_bmbt_irec_t got; /* current file extent record */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t lastx; /* last useful extent number */ - xfs_bmbt_irec_t prev; /* previous extent list record */ + xfs_bmbt_irec_t prev; /* previous file extent record */ ifp = XFS_IFORK_PTR(ip, whichfork); if (unlikely( @@ -5312,18 +5298,18 @@ xfs_bunmapi( xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmbt_irec_t del; /* extent being deleted */ int eof; /* is deleting at eof */ - xfs_bmbt_rec_t *ep; /* extent list entry pointer */ + xfs_bmbt_rec_t *ep; /* extent record pointer */ int error; /* error return value */ xfs_extnum_t extno; /* extent number in list */ - xfs_bmbt_irec_t got; /* current extent list entry */ + xfs_bmbt_irec_t got; /* current extent record */ xfs_ifork_t *ifp; /* inode fork pointer */ int isrt; /* freeing in rt area */ xfs_extnum_t lastx; /* last extent index used */ int logflags; /* transaction logging flags */ xfs_extlen_t mod; /* rt extent offset */ xfs_mount_t *mp; /* mount structure */ - xfs_extnum_t nextents; /* size of extent list */ - xfs_bmbt_irec_t prev; /* previous extent list entry */ + xfs_extnum_t nextents; /* number of file extents */ + xfs_bmbt_irec_t prev; /* previous extent record */ xfs_fileoff_t start; /* first file offset deleted */ int tmp_logflags; /* partial logging flags */ int wasdel; /* was a delayed alloc extent */ @@ -5369,7 +5355,7 @@ xfs_bunmapi( * file, back up to the last block if so... */ if (eof) { - ep = &ifp->if_u1.if_extents[--lastx]; + ep = xfs_iext_get_ext(ifp, --lastx); xfs_bmbt_get_all(ep, &got); bno = got.br_startoff + got.br_blockcount - 1; } @@ -5393,7 +5379,7 @@ xfs_bunmapi( if (got.br_startoff > bno) { if (--lastx < 0) break; - ep--; + ep = xfs_iext_get_ext(ifp, lastx); xfs_bmbt_get_all(ep, &got); } /* @@ -5440,7 +5426,8 @@ xfs_bunmapi( del.br_blockcount : mod; if (bno < got.br_startoff) { if (--lastx >= 0) - xfs_bmbt_get_all(--ep, &got); + xfs_bmbt_get_all(xfs_iext_get_ext( + ifp, lastx), &got); } continue; } @@ -5500,7 +5487,8 @@ xfs_bunmapi( * try again. */ ASSERT(lastx > 0); - xfs_bmbt_get_all(ep - 1, &prev); + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, + lastx - 1), &prev); ASSERT(prev.br_state == XFS_EXT_NORM); ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); ASSERT(del.br_startblock == @@ -5587,12 +5575,12 @@ nodelete: * If not done go on to the next (previous) record. * Reset ep in case the extents array was re-alloced. */ - ep = &ifp->if_u1.if_extents[lastx]; + ep = xfs_iext_get_ext(ifp, lastx); if (bno != (xfs_fileoff_t)-1 && bno >= start) { if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || xfs_bmbt_get_startoff(ep) > bno) { - lastx--; - ep--; + if (--lastx >= 0) + ep = xfs_iext_get_ext(ifp, lastx); } if (lastx >= 0) xfs_bmbt_get_all(ep, &got); @@ -5636,7 +5624,7 @@ nodelete: error0: /* * Log everything. Do this after conversion, there's no point in - * logging the extent list if we've converted to btree format. + * logging the extent records if we've converted to btree format. */ if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) @@ -5892,9 +5880,9 @@ xfs_bmap_isaeof( { int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ - xfs_extnum_t nextents; /* size of extent list */ - xfs_bmbt_irec_t s; /* expanded extent list entry */ + xfs_bmbt_rec_t *lastrec; /* extent record pointer */ + xfs_extnum_t nextents; /* number of file extents */ + xfs_bmbt_irec_t s; /* expanded extent record */ ASSERT(whichfork == XFS_DATA_FORK); ifp = XFS_IFORK_PTR(ip, whichfork); @@ -5909,7 +5897,7 @@ xfs_bmap_isaeof( /* * Go to the last extent */ - lastrec = &ifp->if_u1.if_extents[nextents - 1]; + lastrec = xfs_iext_get_ext(ifp, nextents - 1); xfs_bmbt_get_all(lastrec, &s); /* * Check we are allocating in the last extent (for delayed allocations) @@ -5936,8 +5924,8 @@ xfs_bmap_eof( xfs_fsblock_t blockcount; /* extent block count */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ - xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_rec_t *lastrec; /* extent record pointer */ + xfs_extnum_t nextents; /* number of file extents */ xfs_fileoff_t startoff; /* extent starting file offset */ ASSERT(whichfork == XFS_DATA_FORK); @@ -5953,7 +5941,7 @@ xfs_bmap_eof( /* * Go to the last extent */ - lastrec = &ifp->if_u1.if_extents[nextents - 1]; + lastrec = xfs_iext_get_ext(ifp, nextents - 1); startoff = xfs_bmbt_get_startoff(lastrec); blockcount = xfs_bmbt_get_blockcount(lastrec); *eof = endoff >= startoff + blockcount; @@ -5969,18 +5957,21 @@ xfs_bmap_check_extents( xfs_inode_t *ip, /* incore inode pointer */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *base; /* base of extents list */ xfs_bmbt_rec_t *ep; /* current extent entry */ + xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t nextents; /* number of extents in list */ + xfs_bmbt_rec_t *nextp; /* next extent entry */ ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); - base = ifp->if_u1.if_extents; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (ep = base; ep < &base[nextents - 1]; ep++) { + ep = xfs_iext_get_ext(ifp, 0); + for (idx = 0; idx < nextents - 1; idx++) { + nextp = xfs_iext_get_ext(ifp, idx + 1); xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, - (void *)(ep + 1)); + (void *)(nextp)); + ep = nextp; } } @@ -6119,12 +6110,14 @@ xfs_bmap_check_leaf_extents( xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ - xfs_extnum_t i=0; /* index into the extents list */ + xfs_extnum_t i=0, j; /* index into the extents list */ xfs_ifork_t *ifp; /* fork structure */ int level; /* btree level, for checking */ xfs_mount_t *mp; /* file system mount structure */ xfs_bmbt_ptr_t *pp; /* pointer to block address */ - xfs_bmbt_rec_t *ep, *lastp; /* extent pointers in block entry */ + xfs_bmbt_rec_t *ep; /* pointer to current extent */ + xfs_bmbt_rec_t *lastp; /* pointer to previous extent */ + xfs_bmbt_rec_t *nextp; /* pointer to next extent */ int bp_release = 0; if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { @@ -6194,7 +6187,6 @@ xfs_bmap_check_leaf_extents( */ lastp = NULL; for (;;) { - xfs_bmbt_rec_t *frp; xfs_fsblock_t nextbno; xfs_extnum_t num_recs; @@ -6213,18 +6205,20 @@ xfs_bmap_check_leaf_extents( * conform with the first entry in this one. */ - frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + ep = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - - for (ep = frp;ep < frp + (num_recs - 1); ep++) { + for (j = 1; j < num_recs; j++) { + nextp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + block, j + 1, mp->m_bmap_dmxr[0]); if (lastp) { xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)lastp, (void *)ep); } xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, - (void *)(ep + 1)); + (void *)(nextp)); + lastp = ep; + ep = nextp; } - lastp = frp + num_recs - 1; /* For the next iteration */ i += num_recs; if (bp_release) { @@ -6288,7 +6282,7 @@ xfs_bmap_count_blocks( mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { - if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents, + if (unlikely(xfs_bmap_count_leaves(ifp, 0, ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), count) < 0)) { XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", @@ -6310,7 +6304,7 @@ xfs_bmap_count_blocks( ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); bno = INT_GET(*pp, ARCH_CONVERT); - if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) { + if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); @@ -6327,6 +6321,7 @@ int /* error */ xfs_bmap_count_tree( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ + xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fsblock_t blockno, /* file system block number */ int levelin, /* level in btree */ int *count) /* Count of blocks */ @@ -6339,7 +6334,6 @@ xfs_bmap_count_tree( xfs_fsblock_t nextbno; xfs_bmbt_block_t *block, *nextblock; int numrecs; - xfs_bmbt_rec_t *frp; if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) return error; @@ -6364,7 +6358,7 @@ xfs_bmap_count_tree( xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); bno = INT_GET(*pp, ARCH_CONVERT); if (unlikely((error = - xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) { + xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { xfs_trans_brelse(tp, bp); XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", XFS_ERRLEVEL_LOW, mp); @@ -6376,9 +6370,8 @@ xfs_bmap_count_tree( for (;;) { nextbno = be64_to_cpu(block->bb_rightsib); numrecs = be16_to_cpu(block->bb_numrecs); - frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, - xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) { + if (unlikely(xfs_bmap_disk_count_leaves(ifp, mp, + 0, block, numrecs, count) < 0)) { xfs_trans_brelse(tp, bp); XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", XFS_ERRLEVEL_LOW, mp); @@ -6399,33 +6392,45 @@ xfs_bmap_count_tree( } /* - * Count leaf blocks given a pointer to an extent list. + * Count leaf blocks given a range of extent records. */ int xfs_bmap_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_extnum_t idx, int numrecs, int *count) { int b; + xfs_bmbt_rec_t *frp; - for ( b = 1; b <= numrecs; b++, frp++) + for (b = 0; b < numrecs; b++) { + frp = xfs_iext_get_ext(ifp, idx + b); *count += xfs_bmbt_get_blockcount(frp); + } return 0; } /* - * Count leaf blocks given a pointer to an extent list originally in btree format. + * Count leaf blocks given a range of extent records originally + * in btree format. */ int xfs_bmap_disk_count_leaves( - xfs_bmbt_rec_t *frp, + xfs_ifork_t *ifp, + xfs_mount_t *mp, + xfs_extnum_t idx, + xfs_bmbt_block_t *block, int numrecs, int *count) { int b; + xfs_bmbt_rec_t *frp; - for ( b = 1; b <= numrecs; b++, frp++) + for (b = 1; b <= numrecs; b++) { + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, idx + b, mp->m_bmap_dmxr[0]); *count += xfs_bmbt_disk_get_blockcount(frp); + } return 0; } diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 12cc63dfc2c..011ccaa9a1c 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -20,6 +20,7 @@ struct getbmap; struct xfs_bmbt_irec; +struct xfs_ifork; struct xfs_inode; struct xfs_mount; struct xfs_trans; @@ -347,9 +348,28 @@ xfs_bmap_count_blocks( */ int xfs_check_nostate_extents( - xfs_bmbt_rec_t *ep, + struct xfs_ifork *ifp, + xfs_extnum_t idx, xfs_extnum_t num); +/* + * Call xfs_bmap_do_search_extents() to search for the extent + * record containing block bno. If in multi-level in-core extent + * allocation mode, find and extract the target extent buffer, + * otherwise just use the direct extent list. + */ +xfs_bmbt_rec_t * +xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *, + xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); + +/* + * Search an extent list for the extent which includes block + * bno. + */ +xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *, + xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *, + xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); + #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_H__ */ diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 3f1383d160e..bea44709afb 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -2754,7 +2754,7 @@ xfs_bmbt_update( } /* - * Check an extent list, which has just been read, for + * Check extent records, which have just been read, for * any bit in the extent flag field. ASSERT on debug * kernels, as this condition should not occur. * Return an error condition (1) if any flags found, @@ -2763,10 +2763,14 @@ xfs_bmbt_update( int xfs_check_nostate_extents( - xfs_bmbt_rec_t *ep, + xfs_ifork_t *ifp, + xfs_extnum_t idx, xfs_extnum_t num) { - for (; num > 0; num--, ep++) { + xfs_bmbt_rec_t *ep; + + for (; num > 0; num--, idx++) { + ep = xfs_iext_get_ext(ifp, idx); if ((ep->l0 >> (64 - BMBT_EXNTFLAG_BITLEN)) != 0) { ASSERT(0); diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index e095a2d344a..6478cfa0e53 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -372,14 +372,6 @@ extern int xfs_bmbt_get_rec(struct xfs_btree_cur *, xfs_fileoff_t *, xfs_exntst_t *, int *); #endif -/* - * Search an extent list for the extent which includes block - * bno. - */ -xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *, - xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *, - xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); - #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_BTREE_H__ */ diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h index f57cc9ac875..022fff62085 100644 --- a/fs/xfs/xfs_clnt.h +++ b/fs/xfs/xfs_clnt.h @@ -68,8 +68,6 @@ struct xfs_mount_args { * enforcement */ #define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit * enforcement */ -#define XFSMNT_NOATIME 0x00000100 /* don't modify access - * times on reads */ #define XFSMNT_NOALIGN 0x00000200 /* don't allocate at * stripe boundaries*/ #define XFSMNT_RETERR 0x00000400 /* return error to user */ diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 473671fa5c1..4bae3a76c67 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -126,10 +126,10 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, node = bp->data; node->hdr.info.forw = 0; node->hdr.info.back = 0; - INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC); + node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC); node->hdr.info.pad = 0; node->hdr.count = 0; - INT_SET(node->hdr.level, ARCH_CONVERT, level); + node->hdr.level = cpu_to_be16(level); xfs_da_log_buf(tp, bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); @@ -290,28 +290,28 @@ xfs_da_split(xfs_da_state_t *state) node = oldblk->bp->data; if (node->hdr.info.forw) { - if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) { + if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) { bp = addblk->bp; } else { ASSERT(state->extravalid); bp = state->extrablk.bp; } node = bp->data; - INT_SET(node->hdr.info.back, ARCH_CONVERT, oldblk->blkno); + node->hdr.info.back = cpu_to_be32(oldblk->blkno); xfs_da_log_buf(state->args->trans, bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); } node = oldblk->bp->data; - if (INT_GET(node->hdr.info.back, ARCH_CONVERT)) { - if (INT_GET(node->hdr.info.back, ARCH_CONVERT) == addblk->blkno) { + if (node->hdr.info.back) { + if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) { bp = addblk->bp; } else { ASSERT(state->extravalid); bp = state->extrablk.bp; } node = bp->data; - INT_SET(node->hdr.info.forw, ARCH_CONVERT, oldblk->blkno); + node->hdr.info.forw = cpu_to_be32(oldblk->blkno); xfs_da_log_buf(state->args->trans, bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); @@ -359,14 +359,14 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(bp != NULL); node = bp->data; oldroot = blk1->bp->data; - if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { - size = (int)((char *)&oldroot->btree[INT_GET(oldroot->hdr.count, ARCH_CONVERT)] - + if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) { + size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] - (char *)oldroot); } else { ASSERT(XFS_DIR_IS_V2(mp)); - ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); leaf = (xfs_dir2_leaf_t *)oldroot; - size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] - + size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] - (char *)leaf); } memcpy(node, oldroot, size); @@ -381,18 +381,18 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, error = xfs_da_node_create(args, args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0, - INT_GET(node->hdr.level, ARCH_CONVERT) + 1, &bp, args->whichfork); + be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork); if (error) return(error); node = bp->data; - INT_SET(node->btree[0].hashval, ARCH_CONVERT, blk1->hashval); - INT_SET(node->btree[0].before, ARCH_CONVERT, blk1->blkno); - INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval); - INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno); - INT_SET(node->hdr.count, ARCH_CONVERT, 2); + node->btree[0].hashval = cpu_to_be32(blk1->hashval); + node->btree[0].before = cpu_to_be32(blk1->blkno); + node->btree[1].hashval = cpu_to_be32(blk2->hashval); + node->btree[1].before = cpu_to_be32(blk2->blkno); + node->hdr.count = cpu_to_be16(2); #ifdef DEBUG - if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) { ASSERT(blk1->blkno >= mp->m_dirleafblk && blk1->blkno < mp->m_dirfreeblk); ASSERT(blk2->blkno >= mp->m_dirleafblk && @@ -424,7 +424,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, int useextra; node = oldblk->bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); /* * With V2 the extra block is data or freespace. @@ -435,7 +435,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, /* * Do we have to split the node? */ - if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) { + if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) { /* * Allocate a new node, add to the doubly linked chain of * nodes, then move some of our excess entries into it. @@ -472,7 +472,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, * If we had double-split op below us, then add the extra block too. */ node = oldblk->bp->data; - if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) { + if (oldblk->index <= be16_to_cpu(node->hdr.count)) { oldblk->index++; xfs_da_node_add(state, oldblk, addblk); if (useextra) { @@ -516,17 +516,17 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Figure out how many entries need to move, and in which direction. * Swap the nodes around if that makes it simpler. */ - if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && - ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || - (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < - INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && + ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) || + (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) < + be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) { tmpnode = node1; node1 = node2; node2 = tmpnode; } - ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2; + ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC); + count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2; if (count == 0) return; tp = state->args->trans; @@ -537,7 +537,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move elements in node2 up to make a hole. */ - if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) { + if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) { tmp *= (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[0]; btree_d = &node2->btree[count]; @@ -548,13 +548,12 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Move the req'd B-tree elements from high in node1 to * low in node2. */ - INT_MOD(node2->hdr.count, ARCH_CONVERT, count); + be16_add(&node2->hdr.count, count); tmp = count * (uint)sizeof(xfs_da_node_entry_t); - btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count]; + btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count]; btree_d = &node2->btree[0]; memcpy(btree_d, btree_s, tmp); - INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count)); - + be16_add(&node1->hdr.count, -count); } else { /* * Move the req'd B-tree elements from low in node2 to @@ -563,21 +562,21 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, count = -count; tmp = count * (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[0]; - btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)]; + btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; memcpy(btree_d, btree_s, tmp); - INT_MOD(node1->hdr.count, ARCH_CONVERT, count); + be16_add(&node1->hdr.count, count); xfs_da_log_buf(tp, blk1->bp, XFS_DA_LOGRANGE(node1, btree_d, tmp)); /* * Move elements in node2 down to fill the hole. */ - tmp = INT_GET(node2->hdr.count, ARCH_CONVERT) - count; + tmp = be16_to_cpu(node2->hdr.count) - count; tmp *= (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[count]; btree_d = &node2->btree[0]; memmove(btree_d, btree_s, tmp); - INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count)); + be16_add(&node2->hdr.count, -count); } /* @@ -588,7 +587,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, xfs_da_log_buf(tp, blk2->bp, XFS_DA_LOGRANGE(node2, &node2->hdr, sizeof(node2->hdr) + - sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT))); + sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count))); /* * Record the last hashval from each block for upward propagation. @@ -596,15 +595,15 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, */ node1 = blk1->bp->data; node2 = blk2->bp->data; - blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); - blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval); + blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval); /* * Adjust the expected index for insertion. */ - if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) { - blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT); - blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1; /* make it invalid */ + if (blk1->index >= be16_to_cpu(node1->hdr.count)) { + blk2->index = blk1->index - be16_to_cpu(node1->hdr.count); + blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */ } } @@ -622,8 +621,8 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, node = oldblk->bp->data; mp = state->mp; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT))); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); ASSERT(newblk->blkno != 0); if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) ASSERT(newblk->blkno >= mp->m_dirleafblk && @@ -634,22 +633,22 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, */ tmp = 0; btree = &node->btree[ oldblk->index ]; - if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) { - tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree); + if (oldblk->index < be16_to_cpu(node->hdr.count)) { + tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree); memmove(btree + 1, btree, tmp); } - INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval); - INT_SET(btree->before, ARCH_CONVERT, newblk->blkno); + btree->hashval = cpu_to_be32(newblk->hashval); + btree->before = cpu_to_be32(newblk->blkno); xfs_da_log_buf(state->args->trans, oldblk->bp, XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); - INT_MOD(node->hdr.count, ARCH_CONVERT, +1); + be16_add(&node->hdr.count, 1); xfs_da_log_buf(state->args->trans, oldblk->bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); /* * Copy the last hash value from the oldblk to propagate upwards. */ - oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval); } /*======================================================================== @@ -768,21 +767,21 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) ASSERT(args != NULL); ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); oldroot = root_blk->bp->data; - ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC); ASSERT(!oldroot->hdr.info.forw); ASSERT(!oldroot->hdr.info.back); /* * If the root has more than one child, then don't do anything. */ - if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1) + if (be16_to_cpu(oldroot->hdr.count) > 1) return(0); /* * Read in the (only) child block, then copy those bytes into * the root block's buffer and free the original child block. */ - child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT); + child = be32_to_cpu(oldroot->btree[0].before); ASSERT(child != 0); error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp, args->whichfork); @@ -790,11 +789,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) return(error); ASSERT(bp != NULL); blkinfo = bp->data; - if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) { - ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || - INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + if (be16_to_cpu(oldroot->hdr.level) == 1) { + ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || + be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC); } else { - ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC); } ASSERT(!blkinfo->forw); ASSERT(!blkinfo->back); @@ -830,9 +829,9 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC); node = (xfs_da_intnode_t *)info; - count = INT_GET(node->hdr.count, ARCH_CONVERT); + count = be16_to_cpu(node->hdr.count); if (count > (state->node_ents >> 1)) { *action = 0; /* blk over 50%, don't try to join */ return(0); /* blk over 50%, don't try to join */ @@ -849,7 +848,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -871,13 +870,12 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) * to shrink a directory over time. */ /* start with smaller blk num */ - forward = (INT_GET(info->forw, ARCH_CONVERT) - < INT_GET(info->back, ARCH_CONVERT)); + forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = INT_GET(info->forw, ARCH_CONVERT); + blkno = be32_to_cpu(info->forw); else - blkno = INT_GET(info->back, ARCH_CONVERT); + blkno = be32_to_cpu(info->back); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -889,10 +887,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) node = (xfs_da_intnode_t *)info; count = state->node_ents; count -= state->node_ents >> 2; - count -= INT_GET(node->hdr.count, ARCH_CONVERT); + count -= be16_to_cpu(node->hdr.count); node = bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - count -= INT_GET(node->hdr.count, ARCH_CONVERT); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + count -= be16_to_cpu(node->hdr.count); xfs_da_brelse(state->args->trans, bp); if (count >= 0) break; /* fits with at least 25% to spare */ @@ -973,16 +971,16 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path) } for (blk--, level--; level >= 0; blk--, level--) { node = blk->bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); btree = &node->btree[ blk->index ]; - if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash) + if (be32_to_cpu(btree->hashval) == lasthash) break; blk->hashval = lasthash; - INT_SET(btree->hashval, ARCH_CONVERT, lasthash); + btree->hashval = cpu_to_be32(lasthash); xfs_da_log_buf(state->args->trans, blk->bp, XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); - lasthash = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); } } @@ -997,25 +995,25 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) int tmp; node = drop_blk->bp->data; - ASSERT(drop_blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)); + ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count)); ASSERT(drop_blk->index >= 0); /* * Copy over the offending entry, or just zero it out. */ btree = &node->btree[drop_blk->index]; - if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { - tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1; + if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) { + tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1; tmp *= (uint)sizeof(xfs_da_node_entry_t); memmove(btree, btree + 1, tmp); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, btree, tmp)); - btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ]; + btree = &node->btree[be16_to_cpu(node->hdr.count)-1]; } memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); - INT_MOD(node->hdr.count, ARCH_CONVERT, -1); + be16_add(&node->hdr.count, -1); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); @@ -1023,7 +1021,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) * Copy the last hash value from the block to propagate upwards. */ btree--; - drop_blk->hashval = INT_GET(btree->hashval, ARCH_CONVERT); + drop_blk->hashval = be32_to_cpu(btree->hashval); } /* @@ -1041,40 +1039,40 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, drop_node = drop_blk->bp->data; save_node = save_blk->bp->data; - ASSERT(INT_GET(drop_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - ASSERT(INT_GET(save_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); tp = state->args->trans; /* * If the dying block has lower hashvals, then move all the * elements in the remaining block up to make a hole. */ - if ((INT_GET(drop_node->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(save_node->btree[ 0 ].hashval, ARCH_CONVERT)) || - (INT_GET(drop_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < - INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT))) + if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) || + (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) < + be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval))) { - btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ]; - tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)]; + tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); memmove(btree, &save_node->btree[0], tmp); btree = &save_node->btree[0]; xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, btree, - (INT_GET(save_node->hdr.count, ARCH_CONVERT) + INT_GET(drop_node->hdr.count, ARCH_CONVERT)) * + (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) * sizeof(xfs_da_node_entry_t))); } else { - btree = &save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT) ]; + btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)]; xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, btree, - INT_GET(drop_node->hdr.count, ARCH_CONVERT) * + be16_to_cpu(drop_node->hdr.count) * sizeof(xfs_da_node_entry_t))); } /* * Move all the B-tree elements from drop_blk to save_blk. */ - tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); + tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); memcpy(btree, &drop_node->btree[0], tmp); - INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT)); + be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, &save_node->hdr, @@ -1083,7 +1081,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, /* * Save the last hashval in the remaining block for upward propagation. */ - save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval); } /*======================================================================== @@ -1138,46 +1136,46 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) return(error); } curr = blk->bp->data; - ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || - INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || - INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC || + be16_to_cpu(curr->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || + be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC); /* * Search an intermediate node for a match. */ - blk->magic = INT_GET(curr->magic, ARCH_CONVERT); - if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + blk->magic = be16_to_cpu(curr->magic); + if (blk->magic == XFS_DA_NODE_MAGIC) { node = blk->bp->data; - blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); /* * Binary search. (note: small blocks will skip loop) */ - max = INT_GET(node->hdr.count, ARCH_CONVERT); + max = be16_to_cpu(node->hdr.count); probe = span = max / 2; hashval = args->hashval; for (btree = &node->btree[probe]; span > 4; btree = &node->btree[probe]) { span /= 2; - if (INT_GET(btree->hashval, ARCH_CONVERT) < hashval) + if (be32_to_cpu(btree->hashval) < hashval) probe += span; - else if (INT_GET(btree->hashval, ARCH_CONVERT) > hashval) + else if (be32_to_cpu(btree->hashval) > hashval) probe -= span; else break; } ASSERT((probe >= 0) && (probe < max)); - ASSERT((span <= 4) || (INT_GET(btree->hashval, ARCH_CONVERT) == hashval)); + ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval)); /* * Since we may have duplicate hashval's, find the first * matching hashval in the node. */ - while ((probe > 0) && (INT_GET(btree->hashval, ARCH_CONVERT) >= hashval)) { + while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) { btree--; probe--; } - while ((probe < max) && (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)) { + while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) { btree++; probe++; } @@ -1187,21 +1185,21 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) */ if (probe == max) { blk->index = max-1; - blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT); + blkno = be32_to_cpu(node->btree[max-1].before); } else { blk->index = probe; - blkno = INT_GET(btree->before, ARCH_CONVERT); + blkno = be32_to_cpu(btree->before); } } - else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { + else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) { blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); break; } - else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + else if (be16_to_cpu(curr->magic) == XFS_DIR_LEAF_MAGIC) { blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL); break; } - else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) { blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); break; } @@ -1274,8 +1272,8 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || old_blk->magic == XFS_ATTR_LEAF_MAGIC); - ASSERT(old_blk->magic == INT_GET(old_info->magic, ARCH_CONVERT)); - ASSERT(new_blk->magic == INT_GET(new_info->magic, ARCH_CONVERT)); + ASSERT(old_blk->magic == be16_to_cpu(old_info->magic)); + ASSERT(new_blk->magic == be16_to_cpu(new_info->magic)); ASSERT(old_blk->magic == new_blk->magic); switch (old_blk->magic) { @@ -1302,47 +1300,44 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, /* * Link new block in before existing block. */ - INT_SET(new_info->forw, ARCH_CONVERT, old_blk->blkno); - new_info->back = old_info->back; /* INT_: direct copy */ - if (INT_GET(old_info->back, ARCH_CONVERT)) { + new_info->forw = cpu_to_be32(old_blk->blkno); + new_info->back = old_info->back; + if (old_info->back) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(old_info->back, - ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(old_info->back), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(old_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == old_blk->blkno); - INT_SET(tmp_info->forw, ARCH_CONVERT, new_blk->blkno); + ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic)); + ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); + tmp_info->forw = cpu_to_be32(new_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da_buf_done(bp); } - INT_SET(old_info->back, ARCH_CONVERT, new_blk->blkno); + old_info->back = cpu_to_be32(new_blk->blkno); } else { /* * Link new block in after existing block. */ - new_info->forw = old_info->forw; /* INT_: direct copy */ - INT_SET(new_info->back, ARCH_CONVERT, old_blk->blkno); - if (INT_GET(old_info->forw, ARCH_CONVERT)) { + new_info->forw = old_info->forw; + new_info->back = cpu_to_be32(old_blk->blkno); + if (old_info->forw) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(old_info->forw, ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(old_info->forw), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) - == INT_GET(old_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) - == old_blk->blkno); - INT_SET(tmp_info->back, ARCH_CONVERT, new_blk->blkno); + ASSERT(tmp_info->magic == old_info->magic); + ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); + tmp_info->back = cpu_to_be32(new_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da_buf_done(bp); } - INT_SET(old_info->forw, ARCH_CONVERT, new_blk->blkno); + old_info->forw = cpu_to_be32(new_blk->blkno); } xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); @@ -1360,13 +1355,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) node1 = node1_bp->data; node2 = node2_bp->data; - ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) && - (INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC)); - if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && - ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < - INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || - (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < - INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { + ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) && + (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC)); + if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && + ((be32_to_cpu(node2->btree[0].hashval) < + be32_to_cpu(node1->btree[0].hashval)) || + (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) < + be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) { return(1); } return(0); @@ -1381,12 +1376,12 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) xfs_da_intnode_t *node; node = bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); if (count) - *count = INT_GET(node->hdr.count, ARCH_CONVERT); + *count = be16_to_cpu(node->hdr.count); if (!node->hdr.count) return(0); - return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); + return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); } /* @@ -1411,50 +1406,47 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || save_blk->magic == XFS_ATTR_LEAF_MAGIC); - ASSERT(save_blk->magic == INT_GET(save_info->magic, ARCH_CONVERT)); - ASSERT(drop_blk->magic == INT_GET(drop_info->magic, ARCH_CONVERT)); + ASSERT(save_blk->magic == be16_to_cpu(save_info->magic)); + ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic)); ASSERT(save_blk->magic == drop_blk->magic); - ASSERT((INT_GET(save_info->forw, ARCH_CONVERT) == drop_blk->blkno) || - (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno)); - ASSERT((INT_GET(drop_info->forw, ARCH_CONVERT) == save_blk->blkno) || - (INT_GET(drop_info->back, ARCH_CONVERT) == save_blk->blkno)); + ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || + (be32_to_cpu(save_info->back) == drop_blk->blkno)); + ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || + (be32_to_cpu(drop_info->back) == save_blk->blkno)); /* * Unlink the leaf block from the doubly linked chain of leaves. */ - if (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno) { - save_info->back = drop_info->back; /* INT_: direct copy */ - if (INT_GET(drop_info->back, ARCH_CONVERT)) { + if (be32_to_cpu(save_info->back) == drop_blk->blkno) { + save_info->back = drop_info->back; + if (drop_info->back) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(drop_info->back, - ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(drop_info->back), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(save_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == drop_blk->blkno); - INT_SET(tmp_info->forw, ARCH_CONVERT, save_blk->blkno); + ASSERT(tmp_info->magic == save_info->magic); + ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); + tmp_info->forw = cpu_to_be32(save_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info) - 1); xfs_da_buf_done(bp); } } else { - save_info->forw = drop_info->forw; /* INT_: direct copy */ - if (INT_GET(drop_info->forw, ARCH_CONVERT)) { + save_info->forw = drop_info->forw; + if (drop_info->forw) { error = xfs_da_read_buf(args->trans, args->dp, - INT_GET(drop_info->forw, ARCH_CONVERT), -1, &bp, - args->whichfork); + be32_to_cpu(drop_info->forw), + -1, &bp, args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) - == INT_GET(save_info->magic, ARCH_CONVERT)); - ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) - == drop_blk->blkno); - INT_SET(tmp_info->back, ARCH_CONVERT, save_blk->blkno); + ASSERT(tmp_info->magic == save_info->magic); + ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); + tmp_info->back = cpu_to_be32(save_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info) - 1); xfs_da_buf_done(bp); @@ -1497,14 +1489,14 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, for (blk = &path->blk[level]; level >= 0; blk--, level--) { ASSERT(blk->bp != NULL); node = blk->bp->data; - ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); - if (forward && (blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { + ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) { blk->index++; - blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + blkno = be32_to_cpu(node->btree[blk->index].before); break; } else if (!forward && (blk->index > 0)) { blk->index--; - blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + blkno = be32_to_cpu(node->btree[blk->index].before); break; } } @@ -1536,18 +1528,18 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, return(error); ASSERT(blk->bp != NULL); info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || - INT_GET(info->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || - INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); - blk->magic = INT_GET(info->magic, ARCH_CONVERT); - if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC || + be16_to_cpu(info->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || + be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); + blk->magic = be16_to_cpu(info->magic); + if (blk->magic == XFS_DA_NODE_MAGIC) { node = (xfs_da_intnode_t *)info; - blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); if (forward) blk->index = 0; else - blk->index = INT_GET(node->hdr.count, ARCH_CONVERT)-1; - blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); + blk->index = be16_to_cpu(node->hdr.count)-1; + blkno = be32_to_cpu(node->btree[blk->index].before); } else { ASSERT(level == path->active-1); blk->index = 0; @@ -1788,40 +1780,40 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Get values from the moved block. */ - if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { + if (be16_to_cpu(dead_info->magic) == XFS_DIR_LEAF_MAGIC) { ASSERT(XFS_DIR_IS_V1(mp)); dead_leaf = (xfs_dir_leafblock_t *)dead_info; dead_level = 0; dead_hash = INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); - } else if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { + } else if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) { ASSERT(XFS_DIR_IS_V2(mp)); dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; dead_level = 0; - dead_hash = INT_GET(dead_leaf2->ents[INT_GET(dead_leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval); } else { - ASSERT(INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC); dead_node = (xfs_da_intnode_t *)dead_info; - dead_level = INT_GET(dead_node->hdr.level, ARCH_CONVERT); - dead_hash = INT_GET(dead_node->btree[INT_GET(dead_node->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + dead_level = be16_to_cpu(dead_node->hdr.level); + dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval); } sib_buf = par_buf = NULL; /* * If the moved block has a left sibling, fix up the pointers. */ - if ((sib_blkno = INT_GET(dead_info->back, ARCH_CONVERT))) { + if ((sib_blkno = be32_to_cpu(dead_info->back))) { if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) goto done; sib_info = sib_buf->data; if (unlikely( - INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno || - INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) { + be32_to_cpu(sib_info->forw) != last_blkno || + sib_info->magic != dead_info->magic)) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - INT_SET(sib_info->forw, ARCH_CONVERT, dead_blkno); + sib_info->forw = cpu_to_be32(dead_blkno); xfs_da_log_buf(tp, sib_buf, XFS_DA_LOGRANGE(sib_info, &sib_info->forw, sizeof(sib_info->forw))); @@ -1831,20 +1823,19 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * If the moved block has a right sibling, fix up the pointers. */ - if ((sib_blkno = INT_GET(dead_info->forw, ARCH_CONVERT))) { + if ((sib_blkno = be32_to_cpu(dead_info->forw))) { if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) goto done; sib_info = sib_buf->data; if (unlikely( - INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno - || INT_GET(sib_info->magic, ARCH_CONVERT) - != INT_GET(dead_info->magic, ARCH_CONVERT))) { + be32_to_cpu(sib_info->back) != last_blkno || + sib_info->magic != dead_info->magic)) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - INT_SET(sib_info->back, ARCH_CONVERT, dead_blkno); + sib_info->back = cpu_to_be32(dead_blkno); xfs_da_log_buf(tp, sib_buf, XFS_DA_LOGRANGE(sib_info, &sib_info->back, sizeof(sib_info->back))); @@ -1861,26 +1852,26 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, goto done; par_node = par_buf->data; if (unlikely( - INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC || - (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) { + be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC || + (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - level = INT_GET(par_node->hdr.level, ARCH_CONVERT); + level = be16_to_cpu(par_node->hdr.level); for (entno = 0; - entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && - INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash; + entno < be16_to_cpu(par_node->hdr.count) && + be32_to_cpu(par_node->btree[entno].hashval) < dead_hash; entno++) continue; - if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) { + if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - par_blkno = INT_GET(par_node->btree[entno].before, ARCH_CONVERT); + par_blkno = be32_to_cpu(par_node->btree[entno].before); if (level == dead_level + 1) break; xfs_da_brelse(tp, par_buf); @@ -1892,13 +1883,13 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, */ for (;;) { for (; - entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && - INT_GET(par_node->btree[entno].before, ARCH_CONVERT) != last_blkno; + entno < be16_to_cpu(par_node->hdr.count) && + be32_to_cpu(par_node->btree[entno].before) != last_blkno; entno++) continue; - if (entno < INT_GET(par_node->hdr.count, ARCH_CONVERT)) + if (entno < be16_to_cpu(par_node->hdr.count)) break; - par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT); + par_blkno = be32_to_cpu(par_node->hdr.info.forw); xfs_da_brelse(tp, par_buf); par_buf = NULL; if (unlikely(par_blkno == 0)) { @@ -1911,8 +1902,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, goto done; par_node = par_buf->data; if (unlikely( - INT_GET(par_node->hdr.level, ARCH_CONVERT) != level || - INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) { + be16_to_cpu(par_node->hdr.level) != level || + be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); @@ -1923,7 +1914,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Update the parent entry pointing to the moved block. */ - INT_SET(par_node->btree[entno].before, ARCH_CONVERT, dead_blkno); + par_node->btree[entno].before = cpu_to_be32(dead_blkno); xfs_da_log_buf(tp, par_buf, XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, sizeof(par_node->btree[entno].before))); @@ -2203,8 +2194,8 @@ xfs_da_do_buf( info = rbp->data; data = rbp->data; free = rbp->data; - magic = INT_GET(info->magic, ARCH_CONVERT); - magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT); + magic = be16_to_cpu(info->magic); + magic1 = be32_to_cpu(data->hdr.magic); if (unlikely( XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && (magic != XFS_DIR_LEAF_MAGIC) && @@ -2213,7 +2204,7 @@ xfs_da_do_buf( (magic != XFS_DIR2_LEAFN_MAGIC) && (magic1 != XFS_DIR2_BLOCK_MAGIC) && (magic1 != XFS_DIR2_DATA_MAGIC) && - (INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC), + (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), mp, XFS_ERRTAG_DA_READ_BUF, XFS_RANDOM_DA_READ_BUF))) { xfs_buftrace("DA READ ERROR", rbp->bps[0]); diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 41352113721..243a730d5ec 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -45,10 +45,10 @@ struct zone; (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC) typedef struct xfs_da_blkinfo { - xfs_dablk_t forw; /* previous block in list */ - xfs_dablk_t back; /* following block in list */ - __uint16_t magic; /* validity check on block */ - __uint16_t pad; /* unused */ + __be32 forw; /* previous block in list */ + __be32 back; /* following block in list */ + __be16 magic; /* validity check on block */ + __be16 pad; /* unused */ } xfs_da_blkinfo_t; /* @@ -65,12 +65,12 @@ typedef struct xfs_da_blkinfo { typedef struct xfs_da_intnode { struct xfs_da_node_hdr { /* constant-structure header block */ xfs_da_blkinfo_t info; /* block type, links, etc. */ - __uint16_t count; /* count of active entries */ - __uint16_t level; /* level above leaves (leaf == 0) */ + __be16 count; /* count of active entries */ + __be16 level; /* level above leaves (leaf == 0) */ } hdr; struct xfs_da_node_entry { - xfs_dahash_t hashval; /* hash value for this descendant */ - xfs_dablk_t before; /* Btree block before this key */ + __be32 hashval; /* hash value for this descendant */ + __be32 before; /* Btree block before this key */ } btree[1]; /* variable sized array of keys */ } xfs_da_intnode_t; typedef struct xfs_da_node_hdr xfs_da_node_hdr_t; diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index c6191d00ad2..4968a6358e6 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -83,7 +83,7 @@ xfs_swapext( /* Pull information for the target fd */ if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) || - ((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL)) { + ((vp = vn_from_inode(fp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } @@ -95,7 +95,7 @@ xfs_swapext( } if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || - ((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) { + ((tvp = vn_from_inode(tfp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c index bb87d2a700a..9cc702a839a 100644 --- a/fs/xfs/xfs_dir.c +++ b/fs/xfs/xfs_dir.c @@ -634,7 +634,7 @@ xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen) return(retval); ASSERT(bp != NULL); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); retval = xfs_dir_leaf_lookup_int(bp, args, &index); if (retval == EEXIST) { (void)xfs_dir_leaf_remove(args->trans, bp, index); @@ -912,7 +912,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, return(error); if (bp) leaf = bp->data; - if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + if (bp && be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) { xfs_dir_trace_g_dub("node: block not a leaf", dp, uio, bno); xfs_da_brelse(trans, bp); @@ -949,17 +949,17 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, if (bp == NULL) return(XFS_ERROR(EFSCORRUPTED)); node = bp->data; - if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) + if (be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC) break; btree = &node->btree[0]; xfs_dir_trace_g_dun("node: node detail", dp, uio, node); - for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { - if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { - bno = INT_GET(btree->before, ARCH_CONVERT); + for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) { + if (be32_to_cpu(btree->hashval) >= cookhash) { + bno = be32_to_cpu(btree->before); break; } } - if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { + if (i == be16_to_cpu(node->hdr.count)) { xfs_da_brelse(trans, bp); xfs_dir_trace_g_du("node: hash beyond EOF", dp, uio); @@ -982,7 +982,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, */ for (;;) { leaf = bp->data; - if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) { + if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC)) { xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); xfs_da_brelse(trans, bp); XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)", @@ -990,7 +990,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, return XFS_ERROR(EFSCORRUPTED); } xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); - if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) { + if ((nextbno = be32_to_cpu(leaf->hdr.info.forw))) { nextda = xfs_da_reada_buf(trans, dp, nextbno, XFS_DATA_FORK); } else @@ -1118,21 +1118,20 @@ void xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio, xfs_da_intnode_t *node) { - int last = INT_GET(node->hdr.count, ARCH_CONVERT) - 1; + int last = be16_to_cpu(node->hdr.count) - 1; xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where, (void *)dp, (void *)dp->i_mount, (void *)((unsigned long)(uio->uio_offset >> 32)), (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), (void *)(unsigned long)uio->uio_resid, + (void *)(unsigned long)be32_to_cpu(node->hdr.info.forw), (void *)(unsigned long) - INT_GET(node->hdr.info.forw, ARCH_CONVERT), + be16_to_cpu(node->hdr.count), (void *)(unsigned long) - INT_GET(node->hdr.count, ARCH_CONVERT), + be32_to_cpu(node->btree[0].hashval), (void *)(unsigned long) - INT_GET(node->btree[0].hashval, ARCH_CONVERT), - (void *)(unsigned long) - INT_GET(node->btree[last].hashval, ARCH_CONVERT), + be32_to_cpu(node->btree[last].hashval), NULL, NULL, NULL); } @@ -1150,8 +1149,7 @@ xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio, (void *)((unsigned long)(uio->uio_offset >> 32)), (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), (void *)(unsigned long)uio->uio_resid, - (void *)(unsigned long) - INT_GET(leaf->hdr.info.forw, ARCH_CONVERT), + (void *)(unsigned long)be32_to_cpu(leaf->hdr.info.forw), (void *)(unsigned long) INT_GET(leaf->hdr.count, ARCH_CONVERT), (void *)(unsigned long) diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h index 3158f5dc431..7dd364b1e03 100644 --- a/fs/xfs/xfs_dir2.h +++ b/fs/xfs/xfs_dir2.h @@ -55,16 +55,16 @@ typedef __uint32_t xfs_dir2_db_t; /* * Byte offset in a directory. */ -typedef xfs_off_t xfs_dir2_off_t; +typedef xfs_off_t xfs_dir2_off_t; /* * For getdents, argument struct for put routines. */ typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa); typedef struct xfs_dir2_put_args { - xfs_off_t cook; /* cookie of (next) entry */ + xfs_off_t cook; /* cookie of (next) entry */ xfs_intino_t ino; /* inode number */ - struct xfs_dirent *dbp; /* buffer pointer */ + xfs_dirent_t *dbp; /* buffer pointer */ char *name; /* directory entry name */ int namelen; /* length of name */ int done; /* output: set if value was stored */ @@ -75,18 +75,13 @@ typedef struct xfs_dir2_put_args { /* * Other interfaces used by the rest of the dir v2 code. */ -extern int - xfs_dir2_grow_inode(struct xfs_da_args *args, int space, - xfs_dir2_db_t *dbp); - -extern int - xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); - -extern int - xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); - -extern int - xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, - struct xfs_dabuf *bp); +extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, + xfs_dir2_db_t *dbp); +extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, + int *vp); +extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, + int *vp); +extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, + struct xfs_dabuf *bp); #endif /* __XFS_DIR2_H__ */ diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 31bc99faa70..bd5cee6aa51 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -81,7 +81,7 @@ xfs_dir2_block_addname( xfs_mount_t *mp; /* filesystem mount point */ int needlog; /* need to log header */ int needscan; /* need to rescan freespace */ - xfs_dir2_data_off_t *tagp; /* pointer to tag value */ + __be16 *tagp; /* pointer to tag value */ xfs_trans_t *tp; /* transaction structure */ xfs_dir2_trace_args("block_addname", args); @@ -100,8 +100,7 @@ xfs_dir2_block_addname( /* * Check the magic number, corrupted if wrong. */ - if (unlikely(INT_GET(block->hdr.magic, ARCH_CONVERT) - != XFS_DIR2_BLOCK_MAGIC)) { + if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", XFS_ERRLEVEL_LOW, mp, block); xfs_da_brelse(tp, bp); @@ -121,38 +120,38 @@ xfs_dir2_block_addname( /* * Tag just before the first leaf entry. */ - tagp = (xfs_dir2_data_off_t *)blp - 1; + tagp = (__be16 *)blp - 1; /* * Data object just before the first leaf entry. */ - enddup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); /* * If it's not free then can't do this add without cleaning up: * the space before the first leaf entry needs to be free so it * can be expanded to hold the pointer to the new entry. */ - if (INT_GET(enddup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG) dup = enddup = NULL; /* * Check out the biggest freespace and see if it's the same one. */ else { dup = (xfs_dir2_data_unused_t *) - ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + ((char *)block + be16_to_cpu(bf[0].offset)); if (dup == enddup) { /* * It is the biggest freespace, is it too small * to hold the new leaf too? */ - if (INT_GET(dup->length, ARCH_CONVERT) < len + (uint)sizeof(*blp)) { + if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) { /* * Yes, we use the second-largest * entry instead if it works. */ - if (INT_GET(bf[1].length, ARCH_CONVERT) >= len) + if (be16_to_cpu(bf[1].length) >= len) dup = (xfs_dir2_data_unused_t *) ((char *)block + - INT_GET(bf[1].offset, ARCH_CONVERT)); + be16_to_cpu(bf[1].offset)); else dup = NULL; } @@ -161,7 +160,7 @@ xfs_dir2_block_addname( * Not the same free entry, * just check its length. */ - if (INT_GET(dup->length, ARCH_CONVERT) < len) { + if (be16_to_cpu(dup->length) < len) { dup = NULL; } } @@ -172,9 +171,9 @@ xfs_dir2_block_addname( * If there are stale entries we'll use one for the leaf. * Is the biggest entry enough to avoid compaction? */ - else if (INT_GET(bf[0].length, ARCH_CONVERT) >= len) { + else if (be16_to_cpu(bf[0].length) >= len) { dup = (xfs_dir2_data_unused_t *) - ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); + ((char *)block + be16_to_cpu(bf[0].offset)); compact = 0; } /* @@ -184,20 +183,20 @@ xfs_dir2_block_addname( /* * Tag just before the first leaf entry. */ - tagp = (xfs_dir2_data_off_t *)blp - 1; + tagp = (__be16 *)blp - 1; /* * Data object just before the first leaf entry. */ - dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); /* * If it's not free then the data will go where the * leaf data starts now, if it works at all. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { - if (INT_GET(dup->length, ARCH_CONVERT) + (INT_GET(btp->stale, ARCH_CONVERT) - 1) * + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len) dup = NULL; - } else if ((INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len) + } else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len) dup = NULL; else dup = (xfs_dir2_data_unused_t *)blp; @@ -243,11 +242,11 @@ xfs_dir2_block_addname( int fromidx; /* source leaf index */ int toidx; /* target leaf index */ - for (fromidx = toidx = INT_GET(btp->count, ARCH_CONVERT) - 1, + for (fromidx = toidx = be32_to_cpu(btp->count) - 1, highstale = lfloghigh = -1; fromidx >= 0; fromidx--) { - if (INT_GET(blp[fromidx].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) { if (highstale == -1) highstale = toidx; else { @@ -260,15 +259,15 @@ xfs_dir2_block_addname( blp[toidx] = blp[fromidx]; toidx--; } - lfloglow = toidx + 1 - (INT_GET(btp->stale, ARCH_CONVERT) - 1); - lfloghigh -= INT_GET(btp->stale, ARCH_CONVERT) - 1; - INT_MOD(btp->count, ARCH_CONVERT, -(INT_GET(btp->stale, ARCH_CONVERT) - 1)); + lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1); + lfloghigh -= be32_to_cpu(btp->stale) - 1; + be32_add(&btp->count, -(be32_to_cpu(btp->stale) - 1)); xfs_dir2_data_make_free(tp, bp, (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), - (xfs_dir2_data_aoff_t)((INT_GET(btp->stale, ARCH_CONVERT) - 1) * sizeof(*blp)), + (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), &needlog, &needscan); - blp += INT_GET(btp->stale, ARCH_CONVERT) - 1; - INT_SET(btp->stale, ARCH_CONVERT, 1); + blp += be32_to_cpu(btp->stale) - 1; + btp->stale = cpu_to_be32(1); /* * If we now need to rebuild the bestfree map, do so. * This needs to happen before the next call to use_free. @@ -283,23 +282,23 @@ xfs_dir2_block_addname( * Set leaf logging boundaries to impossible state. * For the no-stale case they're set explicitly. */ - else if (INT_GET(btp->stale, ARCH_CONVERT)) { - lfloglow = INT_GET(btp->count, ARCH_CONVERT); + else if (btp->stale) { + lfloglow = be32_to_cpu(btp->count); lfloghigh = -1; } /* * Find the slot that's first lower than our hash value, -1 if none. */ - for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; low <= high; ) { + for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) { mid = (low + high) >> 1; - if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) break; if (hash < args->hashval) low = mid + 1; else high = mid - 1; } - while (mid >= 0 && INT_GET(blp[mid].hashval, ARCH_CONVERT) >= args->hashval) { + while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) { mid--; } /* @@ -311,14 +310,14 @@ xfs_dir2_block_addname( */ xfs_dir2_data_use_free(tp, bp, enddup, (xfs_dir2_data_aoff_t) - ((char *)enddup - (char *)block + INT_GET(enddup->length, ARCH_CONVERT) - + ((char *)enddup - (char *)block + be16_to_cpu(enddup->length) - sizeof(*blp)), (xfs_dir2_data_aoff_t)sizeof(*blp), &needlog, &needscan); /* * Update the tail (entry count). */ - INT_MOD(btp->count, ARCH_CONVERT, +1); + be32_add(&btp->count, 1); /* * If we now need to rebuild the bestfree map, do so. * This needs to happen before the next call to use_free. @@ -346,12 +345,12 @@ xfs_dir2_block_addname( else { for (lowstale = mid; lowstale >= 0 && - INT_GET(blp[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; for (highstale = mid + 1; - highstale < INT_GET(btp->count, ARCH_CONVERT) && - INT_GET(blp[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + highstale < be32_to_cpu(btp->count) && + be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || mid - lowstale > highstale - mid); highstale++) continue; @@ -359,7 +358,7 @@ xfs_dir2_block_addname( * Move entries toward the low-numbered stale entry. */ if (lowstale >= 0 && - (highstale == INT_GET(btp->count, ARCH_CONVERT) || + (highstale == be32_to_cpu(btp->count) || mid - lowstale <= highstale - mid)) { if (mid - lowstale) memmove(&blp[lowstale], &blp[lowstale + 1], @@ -371,7 +370,7 @@ xfs_dir2_block_addname( * Move entries toward the high-numbered stale entry. */ else { - ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT)); + ASSERT(highstale < be32_to_cpu(btp->count)); mid++; if (highstale - mid) memmove(&blp[mid + 1], &blp[mid], @@ -379,7 +378,7 @@ xfs_dir2_block_addname( lfloglow = MIN(mid, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - INT_MOD(btp->stale, ARCH_CONVERT, -1); + be32_add(&btp->stale, -1); } /* * Point to the new data entry. @@ -388,8 +387,9 @@ xfs_dir2_block_addname( /* * Fill in the leaf entry. */ - INT_SET(blp[mid].hashval, ARCH_CONVERT, args->hashval); - INT_SET(blp[mid].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + blp[mid].hashval = cpu_to_be32(args->hashval); + blp[mid].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); /* * Mark space for the data entry used. @@ -404,7 +404,7 @@ xfs_dir2_block_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, args->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); /* * Clean up the bestfree array and log the header, tail, and entry. */ @@ -485,8 +485,8 @@ xfs_dir2_block_getdents( /* * Unused, skip it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { - ptr += INT_GET(dup->length, ARCH_CONVERT); + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + ptr += be16_to_cpu(dup->length); continue; } @@ -622,7 +622,7 @@ xfs_dir2_block_lookup( * Get the offset from the leaf entry, to point to the data. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); /* * Fill in inode number, release the block. */ @@ -674,10 +674,10 @@ xfs_dir2_block_lookup_int( * Loop doing a binary search for our hash value. * Find our entry, ENOENT if it's not there. */ - for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; ; ) { + for (low = 0, high = be32_to_cpu(btp->count) - 1; ; ) { ASSERT(low <= high); mid = (low + high) >> 1; - if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) + if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) break; if (hash < args->hashval) low = mid + 1; @@ -692,7 +692,7 @@ xfs_dir2_block_lookup_int( /* * Back up to the first one with the right hash value. */ - while (mid > 0 && INT_GET(blp[mid - 1].hashval, ARCH_CONVERT) == args->hashval) { + while (mid > 0 && be32_to_cpu(blp[mid - 1].hashval) == args->hashval) { mid--; } /* @@ -700,7 +700,7 @@ xfs_dir2_block_lookup_int( * right hash value looking for our name. */ do { - if ((addr = INT_GET(blp[mid].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + if ((addr = be32_to_cpu(blp[mid].address)) == XFS_DIR2_NULL_DATAPTR) continue; /* * Get pointer to the entry from the leaf. @@ -717,7 +717,7 @@ xfs_dir2_block_lookup_int( *entno = mid; return 0; } - } while (++mid < INT_GET(btp->count, ARCH_CONVERT) && INT_GET(blp[mid].hashval, ARCH_CONVERT) == hash); + } while (++mid < be32_to_cpu(btp->count) && be32_to_cpu(blp[mid].hashval) == hash); /* * No match, release the buffer and return ENOENT. */ @@ -767,7 +767,7 @@ xfs_dir2_block_removename( * Point to the data entry using the leaf entry. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); /* * Mark the data entry's space free. */ @@ -778,12 +778,12 @@ xfs_dir2_block_removename( /* * Fix up the block tail. */ - INT_MOD(btp->stale, ARCH_CONVERT, +1); + be32_add(&btp->stale, 1); xfs_dir2_block_log_tail(tp, bp); /* * Remove the leaf entry by marking it stale. */ - INT_SET(blp[ent].address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); xfs_dir2_block_log_leaf(tp, bp, ent, ent); /* * Fix up bestfree, log the header if necessary. @@ -843,7 +843,7 @@ xfs_dir2_block_replace( * Point to the data entry we need to change. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber); /* * Change the inode number to the new value. @@ -868,8 +868,8 @@ xfs_dir2_block_sort( la = a; lb = b; - return INT_GET(la->hashval, ARCH_CONVERT) < INT_GET(lb->hashval, ARCH_CONVERT) ? -1 : - (INT_GET(la->hashval, ARCH_CONVERT) > INT_GET(lb->hashval, ARCH_CONVERT) ? 1 : 0); + return be32_to_cpu(la->hashval) < be32_to_cpu(lb->hashval) ? -1 : + (be32_to_cpu(la->hashval) > be32_to_cpu(lb->hashval) ? 1 : 0); } /* @@ -881,7 +881,7 @@ xfs_dir2_leaf_to_block( xfs_dabuf_t *lbp, /* leaf buffer */ xfs_dabuf_t *dbp) /* data buffer */ { - xfs_dir2_data_off_t *bestsp; /* leaf bests table */ + __be16 *bestsp; /* leaf bests table */ xfs_dir2_block_t *block; /* block structure */ xfs_dir2_block_tail_t *btp; /* block tail */ xfs_inode_t *dp; /* incore directory inode */ @@ -896,7 +896,7 @@ xfs_dir2_leaf_to_block( int needscan; /* need to scan for bestfree */ xfs_dir2_sf_hdr_t sfh; /* shortform header */ int size; /* bytes used */ - xfs_dir2_data_off_t *tagp; /* end of entry (tag) */ + __be16 *tagp; /* end of entry (tag) */ int to; /* block/leaf to index */ xfs_trans_t *tp; /* transaction pointer */ @@ -905,7 +905,7 @@ xfs_dir2_leaf_to_block( tp = args->trans; mp = dp->i_mount; leaf = lbp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); /* * If there are data blocks other than the first one, take this @@ -915,11 +915,11 @@ xfs_dir2_leaf_to_block( */ while (dp->i_d.di_size > mp->m_dirblksize) { bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - if (INT_GET(bestsp[INT_GET(ltp->bestcount, ARCH_CONVERT) - 1], ARCH_CONVERT) == + if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) == mp->m_dirblksize - (uint)sizeof(block->hdr)) { if ((error = xfs_dir2_leaf_trim_data(args, lbp, - (xfs_dir2_db_t)(INT_GET(ltp->bestcount, ARCH_CONVERT) - 1)))) + (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1)))) goto out; } else { error = 0; @@ -935,28 +935,29 @@ xfs_dir2_leaf_to_block( goto out; } block = dbp->data; - ASSERT(INT_GET(block->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC); /* * Size of the "leaf" area in the block. */ size = (uint)sizeof(block->tail) + - (uint)sizeof(*lep) * (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + (uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); /* * Look at the last data entry. */ - tagp = (xfs_dir2_data_off_t *)((char *)block + mp->m_dirblksize) - 1; - dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); + tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1; + dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); /* * If it's not free or is too short we can't do it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG || INT_GET(dup->length, ARCH_CONVERT) < size) { + if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG || + be16_to_cpu(dup->length) < size) { error = 0; goto out; } /* * Start converting it to block form. */ - INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); needlog = 1; needscan = 0; /* @@ -968,20 +969,20 @@ xfs_dir2_leaf_to_block( * Initialize the block tail. */ btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); - INT_SET(btp->count, ARCH_CONVERT, INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); + btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); btp->stale = 0; xfs_dir2_block_log_tail(tp, dbp); /* * Initialize the block leaf area. We compact out stale entries. */ lep = XFS_DIR2_BLOCK_LEAF_P(btp); - for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { - if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { + if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) continue; lep[to++] = leaf->ents[from]; } - ASSERT(to == INT_GET(btp->count, ARCH_CONVERT)); - xfs_dir2_block_log_leaf(tp, dbp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + ASSERT(to == be32_to_cpu(btp->count)); + xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1); /* * Scan the bestfree if we need it and log the data block header. */ @@ -1043,7 +1044,7 @@ xfs_dir2_sf_to_block( int offset; /* target block offset */ xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ xfs_dir2_sf_t *sfp; /* shortform structure */ - xfs_dir2_data_off_t *tagp; /* end of data entry */ + __be16 *tagp; /* end of data entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_trace_args("sf_to_block", args); @@ -1095,12 +1096,12 @@ xfs_dir2_sf_to_block( return error; } block = bp->data; - INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); + block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); /* * Compute size of block "tail" area. */ i = (uint)sizeof(*btp) + - (INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); + (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); /* * The whole thing is initialized to free by the init routine. * Say we're using the leaf and tail area. @@ -1114,7 +1115,7 @@ xfs_dir2_sf_to_block( * Fill in the tail. */ btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); - INT_SET(btp->count, ARCH_CONVERT, INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2); /* ., .. */ + btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */ btp->stale = 0; blp = XFS_DIR2_BLOCK_LEAF_P(btp); endoffset = (uint)((char *)blp - (char *)block); @@ -1123,7 +1124,7 @@ xfs_dir2_sf_to_block( */ xfs_dir2_data_use_free(tp, bp, dup, (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), - INT_GET(dup->length, ARCH_CONVERT), &needlog, &needscan); + be16_to_cpu(dup->length), &needlog, &needscan); /* * Create entry for . */ @@ -1133,10 +1134,11 @@ xfs_dir2_sf_to_block( dep->namelen = 1; dep->name[0] = '.'; tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); xfs_dir2_data_log_entry(tp, bp, dep); - INT_SET(blp[0].hashval, ARCH_CONVERT, xfs_dir_hash_dot); - INT_SET(blp[0].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot); + blp[0].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); /* * Create entry for .. */ @@ -1146,15 +1148,16 @@ xfs_dir2_sf_to_block( dep->namelen = 2; dep->name[0] = dep->name[1] = '.'; tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); xfs_dir2_data_log_entry(tp, bp, dep); - INT_SET(blp[1].hashval, ARCH_CONVERT, xfs_dir_hash_dotdot); - INT_SET(blp[1].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); + blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); + blp[1].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, + (char *)dep - (char *)block)); offset = XFS_DIR2_DATA_FIRST_OFFSET; /* * Loop over existing entries, stuff them in. */ - if ((i = 0) == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + if ((i = 0) == sfp->hdr.count) sfep = NULL; else sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); @@ -1176,15 +1179,14 @@ xfs_dir2_sf_to_block( if (offset < newoffset) { dup = (xfs_dir2_data_unused_t *) ((char *)block + offset); - INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(dup->length, ARCH_CONVERT, newoffset - offset); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, - (xfs_dir2_data_off_t) + dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + dup->length = cpu_to_be16(newoffset - offset); + *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16( ((char *)dup - (char *)block)); xfs_dir2_data_log_unused(tp, bp, dup); (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, dup, &dummy); - offset += INT_GET(dup->length, ARCH_CONVERT); + offset += be16_to_cpu(dup->length); continue; } /* @@ -1196,13 +1198,14 @@ xfs_dir2_sf_to_block( dep->namelen = sfep->namelen; memcpy(dep->name, sfep->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); + *tagp = cpu_to_be16((char *)dep - (char *)block); xfs_dir2_data_log_entry(tp, bp, dep); - INT_SET(blp[2 + i].hashval, ARCH_CONVERT, xfs_da_hashname((char *)sfep->name, sfep->namelen)); - INT_SET(blp[2 + i].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, + blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname( + (char *)sfep->name, sfep->namelen)); + blp[2 + i].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); offset = (int)((char *)(tagp + 1) - (char *)block); - if (++i == INT_GET(sfp->hdr.count, ARCH_CONVERT)) + if (++i == sfp->hdr.count) sfep = NULL; else sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); @@ -1212,13 +1215,13 @@ xfs_dir2_sf_to_block( /* * Sort the leaf entries by hash value. */ - xfs_sort(blp, INT_GET(btp->count, ARCH_CONVERT), sizeof(*blp), xfs_dir2_block_sort); + xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort); /* * Log the leaf entry area and tail. * Already logged the header in data_init, ignore needlog. */ ASSERT(needscan == 0); - xfs_dir2_block_log_leaf(tp, bp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); + xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1); xfs_dir2_block_log_tail(tp, bp); xfs_dir2_data_check(dp, bp); xfs_da_buf_done(bp); diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h index a2e5cb98a83..6722effd0b2 100644 --- a/fs/xfs/xfs_dir2_block.h +++ b/fs/xfs/xfs_dir2_block.h @@ -43,8 +43,8 @@ struct xfs_trans; #define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ typedef struct xfs_dir2_block_tail { - __uint32_t count; /* count of leaf entries */ - __uint32_t stale; /* count of stale lf entries */ + __be32 count; /* count of leaf entries */ + __be32 stale; /* count of stale lf entries */ } xfs_dir2_block_tail_t; /* @@ -75,8 +75,7 @@ xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block) static inline struct xfs_dir2_leaf_entry * xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp) { - return (((struct xfs_dir2_leaf_entry *) - (btp)) - INT_GET((btp)->count, ARCH_CONVERT)); + return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count); } /* diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 5b7c47e2f14..bb3d03ff002 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -70,11 +70,11 @@ xfs_dir2_data_check( mp = dp->i_mount; d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); bf = d->hdr.bestfree; p = (char *)d->u; - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); lep = XFS_DIR2_BLOCK_LEAF_P(btp); endp = (char *)lep; @@ -96,8 +96,8 @@ xfs_dir2_data_check( ASSERT(!bf[2].offset); freeseen |= 1 << 2; } - ASSERT(INT_GET(bf[0].length, ARCH_CONVERT) >= INT_GET(bf[1].length, ARCH_CONVERT)); - ASSERT(INT_GET(bf[1].length, ARCH_CONVERT) >= INT_GET(bf[2].length, ARCH_CONVERT)); + ASSERT(be16_to_cpu(bf[0].length) >= be16_to_cpu(bf[1].length)); + ASSERT(be16_to_cpu(bf[1].length) >= be16_to_cpu(bf[2].length)); /* * Loop over the data/unused entries. */ @@ -108,18 +108,20 @@ xfs_dir2_data_check( * If we find it, account for that, else make sure it * doesn't need to be there. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { ASSERT(lastfree == 0); - ASSERT(INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT) == + ASSERT(be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)) == (char *)dup - (char *)d); dfp = xfs_dir2_data_freefind(d, dup); if (dfp) { i = (int)(dfp - bf); ASSERT((freeseen & (1 << i)) == 0); freeseen |= 1 << i; - } else - ASSERT(INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(bf[2].length, ARCH_CONVERT)); - p += INT_GET(dup->length, ARCH_CONVERT); + } else { + ASSERT(be16_to_cpu(dup->length) <= + be16_to_cpu(bf[2].length)); + } + p += be16_to_cpu(dup->length); lastfree = 1; continue; } @@ -132,21 +134,21 @@ xfs_dir2_data_check( dep = (xfs_dir2_data_entry_t *)p; ASSERT(dep->namelen != 0); ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0); - ASSERT(INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT) == + ASSERT(be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)) == (char *)dep - (char *)d); count++; lastfree = 0; - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, (xfs_dir2_data_aoff_t) ((char *)dep - (char *)d)); hash = xfs_da_hashname((char *)dep->name, dep->namelen); - for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { - if (INT_GET(lep[i].address, ARCH_CONVERT) == addr && - INT_GET(lep[i].hashval, ARCH_CONVERT) == hash) + for (i = 0; i < be32_to_cpu(btp->count); i++) { + if (be32_to_cpu(lep[i].address) == addr && + be32_to_cpu(lep[i].hashval) == hash) break; } - ASSERT(i < INT_GET(btp->count, ARCH_CONVERT)); + ASSERT(i < be32_to_cpu(btp->count)); } p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); } @@ -154,15 +156,15 @@ xfs_dir2_data_check( * Need to have seen all the entries and all the bestfree slots. */ ASSERT(freeseen == 7); - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { - for (i = stale = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { - if (INT_GET(lep[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { + for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { + if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; if (i > 0) - ASSERT(INT_GET(lep[i].hashval, ARCH_CONVERT) >= INT_GET(lep[i - 1].hashval, ARCH_CONVERT)); + ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval)); } - ASSERT(count == INT_GET(btp->count, ARCH_CONVERT) - INT_GET(btp->stale, ARCH_CONVERT)); - ASSERT(stale == INT_GET(btp->stale, ARCH_CONVERT)); + ASSERT(count == be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)); + ASSERT(stale == be32_to_cpu(btp->stale)); } } #endif @@ -190,8 +192,8 @@ xfs_dir2_data_freefind( * Check order, non-overlapping entries, and if we find the * one we're looking for it has to be exact. */ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0; dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; dfp++) { @@ -201,23 +203,24 @@ xfs_dir2_data_freefind( continue; } ASSERT(seenzero == 0); - if (INT_GET(dfp->offset, ARCH_CONVERT) == off) { + if (be16_to_cpu(dfp->offset) == off) { matched = 1; - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(dup->length, ARCH_CONVERT)); - } else if (off < INT_GET(dfp->offset, ARCH_CONVERT)) - ASSERT(off + INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(dfp->offset, ARCH_CONVERT)); + ASSERT(dfp->length == dup->length); + } else if (off < be16_to_cpu(dfp->offset)) + ASSERT(off + be16_to_cpu(dup->length) <= be16_to_cpu(dfp->offset)); else - ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) + INT_GET(dfp->length, ARCH_CONVERT) <= off); - ASSERT(matched || INT_GET(dfp->length, ARCH_CONVERT) >= INT_GET(dup->length, ARCH_CONVERT)); + ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off); + ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length)); if (dfp > &d->hdr.bestfree[0]) - ASSERT(INT_GET(dfp[-1].length, ARCH_CONVERT) >= INT_GET(dfp[0].length, ARCH_CONVERT)); + ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length)); } #endif /* * If this is smaller than the smallest bestfree entry, * it can't be there since they're sorted. */ - if (INT_GET(dup->length, ARCH_CONVERT) < INT_GET(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length, ARCH_CONVERT)) + if (be16_to_cpu(dup->length) < + be16_to_cpu(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length)) return NULL; /* * Look at the three bestfree entries for our guy. @@ -227,7 +230,7 @@ xfs_dir2_data_freefind( dfp++) { if (!dfp->offset) return NULL; - if (INT_GET(dfp->offset, ARCH_CONVERT) == off) + if (be16_to_cpu(dfp->offset) == off) return dfp; } /* @@ -249,29 +252,29 @@ xfs_dir2_data_freeinsert( xfs_dir2_data_free_t new; /* new bestfree entry */ #ifdef __KERNEL__ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); #endif dfp = d->hdr.bestfree; - INT_COPY(new.length, dup->length, ARCH_CONVERT); - INT_SET(new.offset, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + new.length = dup->length; + new.offset = cpu_to_be16((char *)dup - (char *)d); /* * Insert at position 0, 1, or 2; or not at all. */ - if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[0].length, ARCH_CONVERT)) { + if (be16_to_cpu(new.length) > be16_to_cpu(dfp[0].length)) { dfp[2] = dfp[1]; dfp[1] = dfp[0]; dfp[0] = new; *loghead = 1; return &dfp[0]; } - if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[1].length, ARCH_CONVERT)) { + if (be16_to_cpu(new.length) > be16_to_cpu(dfp[1].length)) { dfp[2] = dfp[1]; dfp[1] = new; *loghead = 1; return &dfp[1]; } - if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[2].length, ARCH_CONVERT)) { + if (be16_to_cpu(new.length) > be16_to_cpu(dfp[2].length)) { dfp[2] = new; *loghead = 1; return &dfp[2]; @@ -289,8 +292,8 @@ xfs_dir2_data_freeremove( int *loghead) /* out: log data header */ { #ifdef __KERNEL__ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); #endif /* * It's the first entry, slide the next 2 up. @@ -334,8 +337,8 @@ xfs_dir2_data_freescan( char *p; /* current entry pointer */ #ifdef __KERNEL__ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); #endif /* * Start by clearing the table. @@ -348,7 +351,7 @@ xfs_dir2_data_freescan( p = (char *)d->u; if (aendp) endp = aendp; - else if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + else if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); } else @@ -361,11 +364,11 @@ xfs_dir2_data_freescan( /* * If it's a free entry, insert it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { ASSERT((char *)dup - (char *)d == - INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); + be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); xfs_dir2_data_freeinsert(d, dup, loghead); - p += INT_GET(dup->length, ARCH_CONVERT); + p += be16_to_cpu(dup->length); } /* * For active entries, check their tags and skip them. @@ -373,7 +376,7 @@ xfs_dir2_data_freescan( else { dep = (xfs_dir2_data_entry_t *)p; ASSERT((char *)dep - (char *)d == - INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT)); + be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep))); p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); } } @@ -415,8 +418,8 @@ xfs_dir2_data_init( * Initialize the header. */ d = bp->data; - INT_SET(d->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); - INT_SET(d->hdr.bestfree[0].offset, ARCH_CONVERT, (xfs_dir2_data_off_t)sizeof(d->hdr)); + d->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); + d->hdr.bestfree[0].offset = cpu_to_be16(sizeof(d->hdr)); for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { d->hdr.bestfree[i].length = 0; d->hdr.bestfree[i].offset = 0; @@ -425,13 +428,12 @@ xfs_dir2_data_init( * Set up an unused entry for the block's body. */ dup = &d->u[0].unused; - INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); t=mp->m_dirblksize - (uint)sizeof(d->hdr); - INT_SET(d->hdr.bestfree[0].length, ARCH_CONVERT, t); - INT_SET(dup->length, ARCH_CONVERT, t); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)dup - (char *)d)); + d->hdr.bestfree[0].length = cpu_to_be16(t); + dup->length = cpu_to_be16(t); + *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16((char *)dup - (char *)d); /* * Log it and return it. */ @@ -453,8 +455,8 @@ xfs_dir2_data_log_entry( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - (char *)d - 1)); @@ -471,8 +473,8 @@ xfs_dir2_data_log_header( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d), (uint)(sizeof(d->hdr) - 1)); } @@ -489,8 +491,8 @@ xfs_dir2_data_log_unused( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); /* * Log the first part of the unused entry. */ @@ -533,12 +535,12 @@ xfs_dir2_data_make_free( /* * Figure out where the end of the data area is. */ - if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) + if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC) endptr = (char *)d + mp->m_dirblksize; else { xfs_dir2_block_tail_t *btp; /* block tail */ - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); } @@ -547,11 +549,11 @@ xfs_dir2_data_make_free( * the previous entry and see if it's free. */ if (offset > sizeof(d->hdr)) { - xfs_dir2_data_off_t *tagp; /* tag just before us */ + __be16 *tagp; /* tag just before us */ - tagp = (xfs_dir2_data_off_t *)((char *)d + offset) - 1; - prevdup = (xfs_dir2_data_unused_t *)((char *)d + INT_GET(*tagp, ARCH_CONVERT)); - if (INT_GET(prevdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + tagp = (__be16 *)((char *)d + offset) - 1; + prevdup = (xfs_dir2_data_unused_t *)((char *)d + be16_to_cpu(*tagp)); + if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG) prevdup = NULL; } else prevdup = NULL; @@ -562,7 +564,7 @@ xfs_dir2_data_make_free( if ((char *)d + offset + len < endptr) { postdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - if (INT_GET(postdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) + if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG) postdup = NULL; } else postdup = NULL; @@ -586,13 +588,13 @@ xfs_dir2_data_make_free( * since the third bestfree is there, there might be more * entries. */ - needscan = d->hdr.bestfree[2].length; + needscan = (d->hdr.bestfree[2].length != 0); /* * Fix up the new big freespace. */ - INT_MOD(prevdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + be16_add(&prevdup->length, len + be16_to_cpu(postdup->length)); + *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = + cpu_to_be16((char *)prevdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, prevdup); if (!needscan) { /* @@ -614,7 +616,7 @@ xfs_dir2_data_make_free( */ dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp); ASSERT(dfp == &d->hdr.bestfree[0]); - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(prevdup->length, ARCH_CONVERT)); + ASSERT(dfp->length == prevdup->length); ASSERT(!dfp[1].length); ASSERT(!dfp[2].length); } @@ -624,9 +626,9 @@ xfs_dir2_data_make_free( */ else if (prevdup) { dfp = xfs_dir2_data_freefind(d, prevdup); - INT_MOD(prevdup->length, ARCH_CONVERT, len); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); + be16_add(&prevdup->length, len); + *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = + cpu_to_be16((char *)prevdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, prevdup); /* * If the previous entry was in the table, the new entry @@ -640,8 +642,10 @@ xfs_dir2_data_make_free( /* * Otherwise we need a scan if the new entry is big enough. */ - else - needscan = INT_GET(prevdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + else { + needscan = be16_to_cpu(prevdup->length) > + be16_to_cpu(d->hdr.bestfree[2].length); + } } /* * The following entry is free, merge with it. @@ -649,10 +653,10 @@ xfs_dir2_data_make_free( else if (postdup) { dfp = xfs_dir2_data_freefind(d, postdup); newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); - INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length)); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If the following entry was in the table, the new entry @@ -666,18 +670,20 @@ xfs_dir2_data_make_free( /* * Otherwise we need a scan if the new entry is big enough. */ - else - needscan = INT_GET(newdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); + else { + needscan = be16_to_cpu(newdup->length) > + be16_to_cpu(d->hdr.bestfree[2].length); + } } /* * Neither neighbor is free. Make a new entry. */ else { newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); - INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup->length, ARCH_CONVERT, len); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup->length = cpu_to_be16(len); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); } @@ -707,18 +713,18 @@ xfs_dir2_data_use_free( int oldlen; /* old unused entry's length */ d = bp->data; - ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || - INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); - ASSERT(INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG); + ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); ASSERT(offset >= (char *)dup - (char *)d); - ASSERT(offset + len <= (char *)dup + INT_GET(dup->length, ARCH_CONVERT) - (char *)d); - ASSERT((char *)dup - (char *)d == INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); + ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d); + ASSERT((char *)dup - (char *)d == be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); /* * Look up the entry in the bestfree table. */ dfp = xfs_dir2_data_freefind(d, dup); - oldlen = INT_GET(dup->length, ARCH_CONVERT); - ASSERT(dfp || oldlen <= INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT)); + oldlen = be16_to_cpu(dup->length); + ASSERT(dfp || oldlen <= be16_to_cpu(d->hdr.bestfree[2].length)); /* * Check for alignment with front and back of the entry. */ @@ -732,7 +738,7 @@ xfs_dir2_data_use_free( */ if (matchfront && matchback) { if (dfp) { - needscan = d->hdr.bestfree[2].offset; + needscan = (d->hdr.bestfree[2].offset != 0); if (!needscan) xfs_dir2_data_freeremove(d, dfp, needlogp); } @@ -743,10 +749,10 @@ xfs_dir2_data_use_free( */ else if (matchfront) { newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup->length, ARCH_CONVERT, oldlen - len); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup->length = cpu_to_be16(oldlen - len); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. @@ -755,8 +761,8 @@ xfs_dir2_data_use_free( xfs_dir2_data_freeremove(d, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); ASSERT(dfp != NULL); - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); - ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + ASSERT(dfp->length == newdup->length); + ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); /* * If we got inserted at the last slot, * that means we don't know if there was a better @@ -771,10 +777,9 @@ xfs_dir2_data_use_free( */ else if (matchback) { newdup = dup; - INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) - (((char *)d + offset) - (char *)newdup)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. @@ -783,8 +788,8 @@ xfs_dir2_data_use_free( xfs_dir2_data_freeremove(d, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); ASSERT(dfp != NULL); - ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); - ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); + ASSERT(dfp->length == newdup->length); + ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); /* * If we got inserted at the last slot, * that means we don't know if there was a better @@ -799,16 +804,15 @@ xfs_dir2_data_use_free( */ else { newdup = dup; - INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) - (((char *)d + offset) - (char *)newdup)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); + newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = + cpu_to_be16((char *)newdup - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup); newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - INT_SET(newdup2->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); - INT_SET(newdup2->length, ARCH_CONVERT, oldlen - len - INT_GET(newdup->length, ARCH_CONVERT)); - INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup2), ARCH_CONVERT, - (xfs_dir2_data_off_t)((char *)newdup2 - (char *)d)); + newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length)); + *XFS_DIR2_DATA_UNUSED_TAG_P(newdup2) = + cpu_to_be16((char *)newdup2 - (char *)d); xfs_dir2_data_log_unused(tp, bp, newdup2); /* * If the old entry was in the table, we need to scan @@ -819,7 +823,7 @@ xfs_dir2_data_use_free( * the 2 new will work. */ if (dfp) { - needscan = d->hdr.bestfree[2].length; + needscan = (d->hdr.bestfree[2].length != 0); if (!needscan) { xfs_dir2_data_freeremove(d, dfp, needlogp); (void)xfs_dir2_data_freeinsert(d, newdup, diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h index 5e3a7f9ec73..0847cbb53e1 100644 --- a/fs/xfs/xfs_dir2_data.h +++ b/fs/xfs/xfs_dir2_data.h @@ -65,8 +65,8 @@ struct xfs_trans; * The freespace will be formatted as a xfs_dir2_data_unused_t. */ typedef struct xfs_dir2_data_free { - xfs_dir2_data_off_t offset; /* start of freespace */ - xfs_dir2_data_off_t length; /* length of freespace */ + __be16 offset; /* start of freespace */ + __be16 length; /* length of freespace */ } xfs_dir2_data_free_t; /* @@ -75,7 +75,7 @@ typedef struct xfs_dir2_data_free { * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. */ typedef struct xfs_dir2_data_hdr { - __uint32_t magic; /* XFS_DIR2_DATA_MAGIC */ + __be32 magic; /* XFS_DIR2_DATA_MAGIC */ /* or XFS_DIR2_BLOCK_MAGIC */ xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; } xfs_dir2_data_hdr_t; @@ -97,10 +97,10 @@ typedef struct xfs_dir2_data_entry { * Tag appears as the last 2 bytes. */ typedef struct xfs_dir2_data_unused { - __uint16_t freetag; /* XFS_DIR2_DATA_FREE_TAG */ - xfs_dir2_data_off_t length; /* total free length */ + __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */ + __be16 length; /* total free length */ /* variable offset */ - xfs_dir2_data_off_t tag; /* starting offset of us */ + __be16 tag; /* starting offset of us */ } xfs_dir2_data_unused_t; typedef union { @@ -134,12 +134,11 @@ static inline int xfs_dir2_data_entsize(int n) * Pointer to an entry's tag word. */ #define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep) -static inline xfs_dir2_data_off_t * +static inline __be16 * xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) { - return (xfs_dir2_data_off_t *) \ - ((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \ - (uint)sizeof(xfs_dir2_data_off_t)); + return (__be16 *)((char *)dep + + XFS_DIR2_DATA_ENTSIZE(dep->namelen) - sizeof(__be16)); } /* @@ -147,12 +146,11 @@ xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) */ #define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \ xfs_dir2_data_unused_tag_p(dup) -static inline xfs_dir2_data_off_t * +static inline __be16 * xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup) { - return (xfs_dir2_data_off_t *) \ - ((char *)(dup) + INT_GET((dup)->length, ARCH_CONVERT) \ - - (uint)sizeof(xfs_dir2_data_off_t)); + return (__be16 *)((char *)dup + + be16_to_cpu(dup->length) - sizeof(__be16)); } /* diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index d342b6b5523..08648b18265 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -66,7 +66,7 @@ xfs_dir2_block_to_leaf( xfs_da_args_t *args, /* operation arguments */ xfs_dabuf_t *dbp) /* input block's buffer */ { - xfs_dir2_data_off_t *bestsp; /* leaf's bestsp entries */ + __be16 *bestsp; /* leaf's bestsp entries */ xfs_dablk_t blkno; /* leaf block's bno */ xfs_dir2_block_t *block; /* block structure */ xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ @@ -111,14 +111,14 @@ xfs_dir2_block_to_leaf( /* * Set the counts in the leaf header. */ - INT_COPY(leaf->hdr.count, btp->count, ARCH_CONVERT); /* INT_: type change */ - INT_COPY(leaf->hdr.stale, btp->stale, ARCH_CONVERT); /* INT_: type change */ + leaf->hdr.count = cpu_to_be16(be32_to_cpu(btp->count)); + leaf->hdr.stale = cpu_to_be16(be32_to_cpu(btp->stale)); /* * Could compact these but I think we always do the conversion * after squeezing out stale entries. */ - memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t)); - xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1); + memcpy(leaf->ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, lbp, 0, be16_to_cpu(leaf->hdr.count) - 1); needscan = 0; needlog = 1; /* @@ -133,7 +133,7 @@ xfs_dir2_block_to_leaf( /* * Fix up the block header, make it a data block. */ - INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); + block->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); if (needscan) xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, NULL); @@ -141,9 +141,9 @@ xfs_dir2_block_to_leaf( * Set up leaf tail and bests table. */ ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - INT_SET(ltp->bestcount, ARCH_CONVERT, 1); + ltp->bestcount = cpu_to_be32(1); bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - INT_COPY(bestsp[0], block->hdr.bestfree[0].length, ARCH_CONVERT); + bestsp[0] = block->hdr.bestfree[0].length; /* * Log the data header and leaf bests table. */ @@ -163,7 +163,7 @@ int /* error */ xfs_dir2_leaf_addname( xfs_da_args_t *args) /* operation arguments */ { - xfs_dir2_data_off_t *bestsp; /* freespace table in leaf */ + __be16 *bestsp; /* freespace table in leaf */ int compact; /* need to compact leaves */ xfs_dir2_data_t *data; /* data block structure */ xfs_dabuf_t *dbp; /* data block buffer */ @@ -187,7 +187,7 @@ xfs_dir2_leaf_addname( int needbytes; /* leaf block bytes needed */ int needlog; /* need to log data header */ int needscan; /* need to rescan data free */ - xfs_dir2_data_off_t *tagp; /* end of data entry */ + __be16 *tagp; /* end of data entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_db_t use_block; /* data block number */ @@ -222,14 +222,14 @@ xfs_dir2_leaf_addname( * in a data block, improving the lookup of those entries. */ for (use_block = -1, lep = &leaf->ents[index]; - index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; index++, lep++) { - if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) continue; - i = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); - ASSERT(i < INT_GET(ltp->bestcount, ARCH_CONVERT)); - ASSERT(INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF); - if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + i = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); + ASSERT(i < be32_to_cpu(ltp->bestcount)); + ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF); + if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; break; } @@ -238,13 +238,13 @@ xfs_dir2_leaf_addname( * Didn't find a block yet, linear search all the data blocks. */ if (use_block == -1) { - for (i = 0; i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++) { + for (i = 0; i < be32_to_cpu(ltp->bestcount); i++) { /* * Remember a block we see that's missing. */ - if (INT_GET(bestsp[i], ARCH_CONVERT) == NULLDATAOFF && use_block == -1) + if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1) use_block = i; - else if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { + else if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; break; } @@ -260,21 +260,21 @@ xfs_dir2_leaf_addname( * Now kill use_block if it refers to a missing block, so we * can use it as an indication of allocation needed. */ - if (use_block != -1 && INT_GET(bestsp[use_block], ARCH_CONVERT) == NULLDATAOFF) + if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF) use_block = -1; /* * If we don't have enough free bytes but we can make enough * by compacting out stale entries, we'll do that. */ - if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes && - INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1) { + if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes && + be16_to_cpu(leaf->hdr.stale) > 1) { compact = 1; } /* * Otherwise if we don't have enough free bytes we need to * convert to node form. */ - else if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < + else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes) { /* * Just checking or no space reservation, give up. @@ -330,8 +330,8 @@ xfs_dir2_leaf_addname( * There are stale entries, so we'll need log-low and log-high * impossibly bad values later. */ - else if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) { - lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + else if (be16_to_cpu(leaf->hdr.stale)) { + lfloglow = be16_to_cpu(leaf->hdr.count); lfloghigh = -1; } /* @@ -358,13 +358,13 @@ xfs_dir2_leaf_addname( * If we're adding a new data block on the end we need to * extend the bests table. Copy it up one entry. */ - if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) { + if (use_block >= be32_to_cpu(ltp->bestcount)) { bestsp--; memmove(&bestsp[0], &bestsp[1], - INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0])); - INT_MOD(ltp->bestcount, ARCH_CONVERT, +1); + be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0])); + be32_add(<p->bestcount, 1); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); } /* * If we're filling in a previously empty block just log it. @@ -372,7 +372,7 @@ xfs_dir2_leaf_addname( else xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); data = dbp->data; - INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + bestsp[use_block] = data->hdr.bestfree[0].length; grown = 1; } /* @@ -394,8 +394,8 @@ xfs_dir2_leaf_addname( * Point to the biggest freespace in our data block. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); - ASSERT(INT_GET(dup->length, ARCH_CONVERT) >= length); + ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); + ASSERT(be16_to_cpu(dup->length) >= length); needscan = needlog = 0; /* * Mark the initial part of our freespace in use for the new entry. @@ -411,7 +411,7 @@ xfs_dir2_leaf_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + *tagp = cpu_to_be16((char *)dep - (char *)data); /* * Need to scan fix up the bestfree table. */ @@ -427,8 +427,8 @@ xfs_dir2_leaf_addname( * If the bests table needs to be changed, do it. * Log the change unless we've already done that. */ - if (INT_GET(bestsp[use_block], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { - INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(data->hdr.bestfree[0].length)) { + bestsp[use_block] = data->hdr.bestfree[0].length; if (!grown) xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); } @@ -440,15 +440,15 @@ xfs_dir2_leaf_addname( /* * lep is still good as the index leaf entry. */ - if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + if (index < be16_to_cpu(leaf->hdr.count)) memmove(lep + 1, lep, - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); /* * Record low and high logging indices for the leaf. */ lfloglow = index; - lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); - INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + lfloghigh = be16_to_cpu(leaf->hdr.count); + be16_add(&leaf->hdr.count, 1); } /* * There are stale entries. @@ -468,7 +468,7 @@ xfs_dir2_leaf_addname( */ for (lowstale = index - 1; lowstale >= 0 && - INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; @@ -478,8 +478,8 @@ xfs_dir2_leaf_addname( * lowstale entry would be better. */ for (highstale = index; - highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && - INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + highstale < be16_to_cpu(leaf->hdr.count) && + be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale - 1 >= highstale - index); @@ -490,10 +490,10 @@ xfs_dir2_leaf_addname( * If the low one is better, use it. */ if (lowstale >= 0 && - (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale - 1 < highstale - index)) { ASSERT(index - lowstale - 1 >= 0); - ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == XFS_DIR2_NULL_DATAPTR); /* * Copy entries up to cover the stale entry @@ -512,7 +512,7 @@ xfs_dir2_leaf_addname( */ else { ASSERT(highstale - index >= 0); - ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[highstale].address) == XFS_DIR2_NULL_DATAPTR); /* * Copy entries down to copver the stale entry @@ -526,13 +526,14 @@ xfs_dir2_leaf_addname( lfloglow = MIN(index, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + be16_add(&leaf->hdr.stale, -1); } /* * Fill in the new leaf entry. */ - INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, INT_GET(*tagp, ARCH_CONVERT))); + lep->hashval = cpu_to_be32(args->hashval); + lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, + be16_to_cpu(*tagp))); /* * Log the leaf fields and give up the buffers. */ @@ -563,30 +564,30 @@ xfs_dir2_leaf_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); /* * This value is not restrictive enough. * Should factor in the size of the bests table as well. * We can deduce a value for that from di_size. */ - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); /* * Leaves and bests don't overlap. */ - ASSERT((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <= + ASSERT((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <= (char *)XFS_DIR2_LEAF_BESTS_P(ltp)); /* * Check hash value order, count stale entries. */ - for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { - if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) - ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= - INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); - if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { + if (i + 1 < be16_to_cpu(leaf->hdr.count)) + ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= + be32_to_cpu(leaf->ents[i + 1].hashval)); + if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; } - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); + ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); } #endif /* DEBUG */ @@ -611,8 +612,8 @@ xfs_dir2_leaf_compact( /* * Compress out the stale entries in place. */ - for (from = to = 0, loglow = -1; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { - if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) { + if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) continue; /* * Only actually copy the entries that are different. @@ -627,8 +628,8 @@ xfs_dir2_leaf_compact( /* * Update and log the header, log the leaf entries. */ - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == from - to); - INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(INT_GET(leaf->hdr.stale, ARCH_CONVERT))); + ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to); + be16_add(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale))); leaf->hdr.stale = 0; xfs_dir2_leaf_log_header(args->trans, bp); if (loglow != -1) @@ -662,14 +663,14 @@ xfs_dir2_leaf_compact_x1( int to; /* destination copy index */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1); + ASSERT(be16_to_cpu(leaf->hdr.stale) > 1); index = *indexp; /* * Find the first stale entry before our index, if any. */ for (lowstale = index - 1; lowstale >= 0 && - INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; + be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; /* @@ -677,8 +678,8 @@ xfs_dir2_leaf_compact_x1( * Stop if the answer would be worse than lowstale. */ for (highstale = index; - highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && - INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && + highstale < be16_to_cpu(leaf->hdr.count) && + be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale > highstale - index); highstale++) continue; @@ -686,7 +687,7 @@ xfs_dir2_leaf_compact_x1( * Pick the better of lowstale and highstale. */ if (lowstale >= 0 && - (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale <= highstale - index)) keepstale = lowstale; else @@ -695,14 +696,14 @@ xfs_dir2_leaf_compact_x1( * Copy the entries in place, removing all the stale entries * except keepstale. */ - for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { /* * Notice the new value of index. */ if (index == from) newindex = to; if (from != keepstale && - INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { + be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) { if (from == to) *lowlogp = to; continue; @@ -730,8 +731,8 @@ xfs_dir2_leaf_compact_x1( /* * Adjust the leaf header values. */ - INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(from - to)); - INT_SET(leaf->hdr.stale, ARCH_CONVERT, 1); + be16_add(&leaf->hdr.count, -(from - to)); + leaf->hdr.stale = cpu_to_be16(1); /* * Remember the low/high stale value only in the "right" * direction. @@ -739,8 +740,8 @@ xfs_dir2_leaf_compact_x1( if (lowstale >= newindex) lowstale = -1; else - highstale = INT_GET(leaf->hdr.count, ARCH_CONVERT); - *highlogp = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1; + highstale = be16_to_cpu(leaf->hdr.count); + *highlogp = be16_to_cpu(leaf->hdr.count) - 1; *lowstalep = lowstale; *highstalep = highstale; } @@ -766,7 +767,7 @@ xfs_dir2_leaf_getdents( xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ int eof; /* reached end of directory */ - int error=0; /* error return value */ + int error = 0; /* error return value */ int i; /* temporary loop index */ int j; /* temporary loop index */ int length; /* temporary length value */ @@ -778,8 +779,8 @@ xfs_dir2_leaf_getdents( xfs_mount_t *mp; /* filesystem mount point */ xfs_dir2_off_t newoff; /* new curoff after new blk */ int nmap; /* mappings to ask xfs_bmapi */ - xfs_dir2_put_args_t p; /* formatting arg bundle */ - char *ptr=NULL; /* pointer to current data */ + xfs_dir2_put_args_t *p; /* formatting arg bundle */ + char *ptr = NULL; /* pointer to current data */ int ra_current; /* number of read-ahead blks */ int ra_index; /* *map index for read-ahead */ int ra_offset; /* map entry offset for ra */ @@ -797,9 +798,10 @@ xfs_dir2_leaf_getdents( /* * Setup formatting arguments. */ - p.dbp = dbp; - p.put = put; - p.uio = uio; + p = kmem_alloc(sizeof(*p), KM_SLEEP); + p->dbp = dbp; + p->put = put; + p->uio = uio; /* * Set up to bmap a number of blocks based on the caller's * buffer size, the directory block size, and the filesystem @@ -1046,11 +1048,10 @@ xfs_dir2_leaf_getdents( while ((char *)ptr - (char *)data < byteoff) { dup = (xfs_dir2_data_unused_t *)ptr; - if (INT_GET(dup->freetag, ARCH_CONVERT) + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - length = INT_GET(dup->length, - ARCH_CONVERT); + length = be16_to_cpu(dup->length); ptr += length; continue; } @@ -1079,9 +1080,8 @@ xfs_dir2_leaf_getdents( /* * No, it's unused, skip over it. */ - if (INT_GET(dup->freetag, ARCH_CONVERT) - == XFS_DIR2_DATA_FREE_TAG) { - length = INT_GET(dup->length, ARCH_CONVERT); + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + length = be16_to_cpu(dup->length); ptr += length; curoff += length; continue; @@ -1092,24 +1092,24 @@ xfs_dir2_leaf_getdents( */ dep = (xfs_dir2_data_entry_t *)ptr; - p.namelen = dep->namelen; + p->namelen = dep->namelen; - length = XFS_DIR2_DATA_ENTSIZE(p.namelen); + length = XFS_DIR2_DATA_ENTSIZE(p->namelen); - p.cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); + p->cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); - p.ino = INT_GET(dep->inumber, ARCH_CONVERT); + p->ino = INT_GET(dep->inumber, ARCH_CONVERT); #if XFS_BIG_INUMS - p.ino += mp->m_inoadd; + p->ino += mp->m_inoadd; #endif - p.name = (char *)dep->name; + p->name = (char *)dep->name; - error = p.put(&p); + error = p->put(p); /* * Won't fit. Return to caller. */ - if (!p.done) { + if (!p->done) { eof = 0; break; } @@ -1129,6 +1129,7 @@ xfs_dir2_leaf_getdents( else uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); kmem_free(map, map_size * sizeof(*map)); + kmem_free(p, sizeof(*p)); if (bp) xfs_da_brelse(tp, bp); return error; @@ -1171,7 +1172,7 @@ xfs_dir2_leaf_init( /* * Initialize the header. */ - INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, magic); + leaf->hdr.info.magic = cpu_to_be16(magic); leaf->hdr.info.forw = 0; leaf->hdr.info.back = 0; leaf->hdr.count = 0; @@ -1201,13 +1202,13 @@ xfs_dir2_leaf_log_bests( int first, /* first entry to log */ int last) /* last entry to log */ { - xfs_dir2_data_off_t *firstb; /* pointer to first entry */ - xfs_dir2_data_off_t *lastb; /* pointer to last entry */ + __be16 *firstb; /* pointer to first entry */ + __be16 *lastb; /* pointer to last entry */ xfs_dir2_leaf_t *leaf; /* leaf structure */ xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first; lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last; @@ -1230,8 +1231,8 @@ xfs_dir2_leaf_log_ents( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || - INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || + be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); firstlep = &leaf->ents[first]; lastlep = &leaf->ents[last]; xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), @@ -1249,8 +1250,8 @@ xfs_dir2_leaf_log_header( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || - INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || + be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), (uint)(sizeof(leaf->hdr) - 1)); } @@ -1269,7 +1270,7 @@ xfs_dir2_leaf_log_tail( mp = tp->t_mountp; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), (uint)(mp->m_dirblksize - 1)); @@ -1314,7 +1315,7 @@ xfs_dir2_leaf_lookup( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); /* * Return the found inode number. */ @@ -1373,17 +1374,17 @@ xfs_dir2_leaf_lookup_int( * looking to match the name. */ for (lep = &leaf->ents[index], dbp = NULL, curdb = -1; - index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; lep++, index++) { /* * Skip over stale leaf entries. */ - if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) continue; /* * Get the new data block number. */ - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); /* * If it's not the same as the old data block number, * need to pitch the old one and read the new one. @@ -1406,7 +1407,7 @@ xfs_dir2_leaf_lookup_int( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); /* * If it matches then return it. */ @@ -1435,7 +1436,7 @@ int /* error */ xfs_dir2_leaf_removename( xfs_da_args_t *args) /* operation arguments */ { - xfs_dir2_data_off_t *bestsp; /* leaf block best freespace */ + __be16 *bestsp; /* leaf block best freespace */ xfs_dir2_data_t *data; /* data block structure */ xfs_dir2_db_t db; /* data block number */ xfs_dabuf_t *dbp; /* data block buffer */ @@ -1471,14 +1472,14 @@ xfs_dir2_leaf_removename( * Point to the leaf entry, use that to point to the data entry. */ lep = &leaf->ents[index]; - db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); dep = (xfs_dir2_data_entry_t *) - ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); needscan = needlog = 0; - oldbest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + oldbest = be16_to_cpu(data->hdr.bestfree[0].length); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - ASSERT(INT_GET(bestsp[db], ARCH_CONVERT) == oldbest); + ASSERT(be16_to_cpu(bestsp[db]) == oldbest); /* * Mark the former data entry unused. */ @@ -1488,9 +1489,9 @@ xfs_dir2_leaf_removename( /* * We just mark the leaf entry stale by putting a null in it. */ - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + be16_add(&leaf->hdr.stale, 1); xfs_dir2_leaf_log_header(tp, lbp); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); xfs_dir2_leaf_log_ents(tp, lbp, index, index); /* * Scan the freespace in the data block again if necessary, @@ -1504,15 +1505,15 @@ xfs_dir2_leaf_removename( * If the longest freespace in the data block has changed, * put the new value in the bests table and log that. */ - if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) != oldbest) { - INT_COPY(bestsp[db], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (be16_to_cpu(data->hdr.bestfree[0].length) != oldbest) { + bestsp[db] = data->hdr.bestfree[0].length; xfs_dir2_leaf_log_bests(tp, lbp, db, db); } xfs_dir2_data_check(dp, dbp); /* * If the data block is now empty then get rid of the data block. */ - if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + if (be16_to_cpu(data->hdr.bestfree[0].length) == mp->m_dirblksize - (uint)sizeof(data->hdr)) { ASSERT(db != mp->m_dirdatablk); if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { @@ -1535,12 +1536,12 @@ xfs_dir2_leaf_removename( * If this is the last data block then compact the * bests table by getting rid of entries. */ - if (db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1) { + if (db == be32_to_cpu(ltp->bestcount) - 1) { /* * Look for the last active entry (i). */ for (i = db - 1; i > 0; i--) { - if (INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF) + if (be16_to_cpu(bestsp[i]) != NULLDATAOFF) break; } /* @@ -1548,12 +1549,12 @@ xfs_dir2_leaf_removename( * end are removed. */ memmove(&bestsp[db - i], bestsp, - (INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp)); - INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i)); + (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp)); + be32_add(<p->bestcount, -(db - i)); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); } else - INT_SET(bestsp[db], ARCH_CONVERT, NULLDATAOFF); + bestsp[db] = cpu_to_be16(NULLDATAOFF); } /* * If the data block was not the first one, drop it. @@ -1604,7 +1605,7 @@ xfs_dir2_leaf_replace( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT)); /* * Put the new inode number in, log it. @@ -1645,11 +1646,11 @@ xfs_dir2_leaf_search_hash( * Note, the table cannot be empty, so we have to go through the loop. * Binary search the leaf entries looking for our hash value. */ - for (lep = leaf->ents, low = 0, high = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1, + for (lep = leaf->ents, low = 0, high = be16_to_cpu(leaf->hdr.count) - 1, hashwant = args->hashval; low <= high; ) { mid = (low + high) >> 1; - if ((hash = INT_GET(lep[mid].hashval, ARCH_CONVERT)) == hashwant) + if ((hash = be32_to_cpu(lep[mid].hashval)) == hashwant) break; if (hash < hashwant) low = mid + 1; @@ -1660,7 +1661,7 @@ xfs_dir2_leaf_search_hash( * Found one, back up through all the equal hash values. */ if (hash == hashwant) { - while (mid > 0 && INT_GET(lep[mid - 1].hashval, ARCH_CONVERT) == hashwant) { + while (mid > 0 && be32_to_cpu(lep[mid - 1].hashval) == hashwant) { mid--; } } @@ -1682,7 +1683,7 @@ xfs_dir2_leaf_trim_data( xfs_dabuf_t *lbp, /* leaf buffer */ xfs_dir2_db_t db) /* data block number */ { - xfs_dir2_data_off_t *bestsp; /* leaf bests table */ + __be16 *bestsp; /* leaf bests table */ #ifdef DEBUG xfs_dir2_data_t *data; /* data block structure */ #endif @@ -1706,7 +1707,7 @@ xfs_dir2_leaf_trim_data( } #ifdef DEBUG data = dbp->data; - ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); #endif /* this seems to be an error * data is only valid if DEBUG is defined? @@ -1715,9 +1716,9 @@ xfs_dir2_leaf_trim_data( leaf = lbp->data; ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == + ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) == mp->m_dirblksize - (uint)sizeof(data->hdr)); - ASSERT(db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); /* * Get rid of the data block. */ @@ -1730,10 +1731,10 @@ xfs_dir2_leaf_trim_data( * Eliminate the last bests entry from the table. */ bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - INT_MOD(ltp->bestcount, ARCH_CONVERT, -1); - memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp)); + be32_add(<p->bestcount, -1); + memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); return 0; } @@ -1805,7 +1806,7 @@ xfs_dir2_node_to_leaf( return 0; lbp = state->path.blk[0].bp; leaf = lbp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); /* * Read the freespace block. */ @@ -1814,15 +1815,15 @@ xfs_dir2_node_to_leaf( return error; } free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); ASSERT(!free->hdr.firstdb); /* * Now see if the leafn and free data will fit in a leaf1. * If not, release the buffer and give up. */ if ((uint)sizeof(leaf->hdr) + - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)) * (uint)sizeof(leaf->ents[0]) + - INT_GET(free->hdr.nvalid, ARCH_CONVERT) * (uint)sizeof(leaf->bests[0]) + + (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)) * (uint)sizeof(leaf->ents[0]) + + be32_to_cpu(free->hdr.nvalid) * (uint)sizeof(leaf->bests[0]) + (uint)sizeof(leaf->tail) > mp->m_dirblksize) { xfs_da_brelse(tp, fbp); @@ -1832,22 +1833,22 @@ xfs_dir2_node_to_leaf( * If the leaf has any stale entries in it, compress them out. * The compact routine will log the header. */ - if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) + if (be16_to_cpu(leaf->hdr.stale)) xfs_dir2_leaf_compact(args, lbp); else xfs_dir2_leaf_log_header(tp, lbp); - INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAF1_MAGIC); + leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAF1_MAGIC); /* * Set up the leaf tail from the freespace block. */ ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - INT_COPY(ltp->bestcount, free->hdr.nvalid, ARCH_CONVERT); + ltp->bestcount = free->hdr.nvalid; /* * Set up the leaf bests table. */ memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests, - INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0])); - xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); + be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0])); + xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); xfs_dir2_leaf_log_tail(tp, lbp); xfs_dir2_leaf_check(dp, lbp); /* diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h index 1393993d61e..f57ca116241 100644 --- a/fs/xfs/xfs_dir2_leaf.h +++ b/fs/xfs/xfs_dir2_leaf.h @@ -46,23 +46,23 @@ typedef __uint32_t xfs_dir2_dataptr_t; */ typedef struct xfs_dir2_leaf_hdr { xfs_da_blkinfo_t info; /* header for da routines */ - __uint16_t count; /* count of entries */ - __uint16_t stale; /* count of stale entries */ + __be16 count; /* count of entries */ + __be16 stale; /* count of stale entries */ } xfs_dir2_leaf_hdr_t; /* * Leaf block entry. */ typedef struct xfs_dir2_leaf_entry { - xfs_dahash_t hashval; /* hash value of name */ - xfs_dir2_dataptr_t address; /* address of data entry */ + __be32 hashval; /* hash value of name */ + __be32 address; /* address of data entry */ } xfs_dir2_leaf_entry_t; /* * Leaf block tail. */ typedef struct xfs_dir2_leaf_tail { - __uint32_t bestcount; + __be32 bestcount; } xfs_dir2_leaf_tail_t; /* @@ -105,11 +105,10 @@ xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp) * Get address of the bests array in the single-leaf block. */ #define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp) -static inline xfs_dir2_data_off_t * +static inline __be16 * xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp) { - return (xfs_dir2_data_off_t *) - (ltp) - INT_GET((ltp)->bestcount, ARCH_CONVERT); + return (__be16 *)ltp - be32_to_cpu(ltp->bestcount); } /* diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 641f8633d25..af556f16a0c 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -76,7 +76,7 @@ xfs_dir2_free_log_bests( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&free->bests[first] - (char *)free), (uint)((char *)&free->bests[last] - (char *)free + @@ -94,7 +94,7 @@ xfs_dir2_free_log_header( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); } @@ -114,14 +114,14 @@ xfs_dir2_leaf_to_node( xfs_dabuf_t *fbp; /* freespace buffer */ xfs_dir2_db_t fdb; /* freespace block number */ xfs_dir2_free_t *free; /* freespace structure */ - xfs_dir2_data_off_t *from; /* pointer to freespace entry */ + __be16 *from; /* pointer to freespace entry */ int i; /* leaf freespace index */ xfs_dir2_leaf_t *leaf; /* leaf structure */ xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ xfs_mount_t *mp; /* filesystem mount point */ int n; /* count of live freespc ents */ xfs_dir2_data_off_t off; /* freespace entry value */ - xfs_dir2_data_off_t *to; /* pointer to freespace entry */ + __be16 *to; /* pointer to freespace entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_trace_args_b("leaf_to_node", args, lbp); @@ -149,28 +149,28 @@ xfs_dir2_leaf_to_node( /* * Initialize the freespace block header. */ - INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); + free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); free->hdr.firstdb = 0; - ASSERT(INT_GET(ltp->bestcount, ARCH_CONVERT) <= (uint)dp->i_d.di_size / mp->m_dirblksize); - INT_COPY(free->hdr.nvalid, ltp->bestcount, ARCH_CONVERT); + ASSERT(be32_to_cpu(ltp->bestcount) <= (uint)dp->i_d.di_size / mp->m_dirblksize); + free->hdr.nvalid = ltp->bestcount; /* * Copy freespace entries from the leaf block to the new block. * Count active entries. */ for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests; - i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++, from++, to++) { - if ((off = INT_GET(*from, ARCH_CONVERT)) != NULLDATAOFF) + i < be32_to_cpu(ltp->bestcount); i++, from++, to++) { + if ((off = be16_to_cpu(*from)) != NULLDATAOFF) n++; - INT_SET(*to, ARCH_CONVERT, off); + *to = cpu_to_be16(off); } - INT_SET(free->hdr.nused, ARCH_CONVERT, n); - INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAFN_MAGIC); + free->hdr.nused = cpu_to_be32(n); + leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAFN_MAGIC); /* * Log everything. */ xfs_dir2_leaf_log_header(tp, lbp); xfs_dir2_free_log_header(tp, fbp); - xfs_dir2_free_log_bests(tp, fbp, 0, INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1); + xfs_dir2_free_log_bests(tp, fbp, 0, be32_to_cpu(free->hdr.nvalid) - 1); xfs_da_buf_done(fbp); xfs_dir2_leafn_check(dp, lbp); return 0; @@ -217,15 +217,15 @@ xfs_dir2_leafn_add( * a compact. */ - if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { + if (be16_to_cpu(leaf->hdr.count) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { if (!leaf->hdr.stale) return XFS_ERROR(ENOSPC); - compact = INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1; + compact = be16_to_cpu(leaf->hdr.stale) > 1; } else compact = 0; - ASSERT(index == 0 || INT_GET(leaf->ents[index - 1].hashval, ARCH_CONVERT) <= args->hashval); - ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || - INT_GET(leaf->ents[index].hashval, ARCH_CONVERT) >= args->hashval); + ASSERT(index == 0 || be32_to_cpu(leaf->ents[index - 1].hashval) <= args->hashval); + ASSERT(index == be16_to_cpu(leaf->hdr.count) || + be32_to_cpu(leaf->ents[index].hashval) >= args->hashval); if (args->justcheck) return 0; @@ -242,7 +242,7 @@ xfs_dir2_leafn_add( * Set impossible logging indices for this case. */ else if (leaf->hdr.stale) { - lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); + lfloglow = be16_to_cpu(leaf->hdr.count); lfloghigh = -1; } /* @@ -250,12 +250,12 @@ xfs_dir2_leafn_add( */ if (!leaf->hdr.stale) { lep = &leaf->ents[index]; - if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + if (index < be16_to_cpu(leaf->hdr.count)) memmove(lep + 1, lep, - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); + (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); lfloglow = index; - lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); - INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); + lfloghigh = be16_to_cpu(leaf->hdr.count); + be16_add(&leaf->hdr.count, 1); } /* * There are stale entries. We'll use one for the new entry. @@ -271,7 +271,7 @@ xfs_dir2_leafn_add( */ for (lowstale = index - 1; lowstale >= 0 && - INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != + be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; @@ -281,8 +281,8 @@ xfs_dir2_leafn_add( * lowstale already found. */ for (highstale = index; - highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && - INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != + highstale < be16_to_cpu(leaf->hdr.count) && + be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale - 1 >= highstale - index); @@ -294,9 +294,9 @@ xfs_dir2_leafn_add( * Shift entries up toward the stale slot. */ if (lowstale >= 0 && - (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale - 1 < highstale - index)) { - ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == XFS_DIR2_NULL_DATAPTR); ASSERT(index - lowstale - 1 >= 0); if (index - lowstale - 1 > 0) @@ -312,7 +312,7 @@ xfs_dir2_leafn_add( * Shift entries down toward the stale slot. */ else { - ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == + ASSERT(be32_to_cpu(leaf->ents[highstale].address) == XFS_DIR2_NULL_DATAPTR); ASSERT(highstale - index >= 0); if (highstale - index > 0) @@ -323,13 +323,14 @@ xfs_dir2_leafn_add( lfloglow = MIN(index, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); + be16_add(&leaf->hdr.stale, -1); } /* * Insert the new entry, log everything. */ - INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, args->blkno, args->index)); + lep->hashval = cpu_to_be32(args->hashval); + lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, + args->blkno, args->index)); xfs_dir2_leaf_log_header(tp, bp); xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); xfs_dir2_leafn_check(dp, bp); @@ -352,17 +353,17 @@ xfs_dir2_leafn_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); - for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { - if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) { - ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= - INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { + if (i + 1 < be16_to_cpu(leaf->hdr.count)) { + ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= + be32_to_cpu(leaf->ents[i + 1].hashval)); } - if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; } - ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); + ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); } #endif /* DEBUG */ @@ -378,12 +379,12 @@ xfs_dir2_leafn_lasthash( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); if (count) - *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) return 0; - return INT_GET(leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + return be32_to_cpu(leaf->ents[be16_to_cpu(leaf->hdr.count) - 1].hashval); } /* @@ -419,9 +420,9 @@ xfs_dir2_leafn_lookup_int( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); #ifdef __KERNEL__ - ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) > 0); + ASSERT(be16_to_cpu(leaf->hdr.count) > 0); #endif xfs_dir2_leafn_check(dp, bp); /* @@ -443,7 +444,7 @@ xfs_dir2_leafn_lookup_int( curdb = -1; length = XFS_DIR2_DATA_ENTSIZE(args->namelen); if ((free = (curbp ? curbp->data : NULL))) - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); } /* * For others, it's a data block buffer, get the block number. @@ -456,17 +457,17 @@ xfs_dir2_leafn_lookup_int( * Loop over leaf entries with the right hash value. */ for (lep = &leaf->ents[index]; - index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; + index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; lep++, index++) { /* * Skip stale leaf entries. */ - if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) continue; /* * Pull the data block number from the entry. */ - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); /* * For addname, we're looking for a place to put the new entry. * We want to use a data block with an entry of equal @@ -506,15 +507,15 @@ xfs_dir2_leafn_lookup_int( } curfdb = newfdb; free = curbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); - ASSERT((INT_GET(free->hdr.firstdb, ARCH_CONVERT) % + ASSERT((be32_to_cpu(free->hdr.firstdb) % XFS_DIR2_MAX_FREE_BESTS(mp)) == 0); - ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) <= curdb); + ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb); ASSERT(curdb < - INT_GET(free->hdr.firstdb, ARCH_CONVERT) + - INT_GET(free->hdr.nvalid, ARCH_CONVERT)); + be32_to_cpu(free->hdr.firstdb) + + be32_to_cpu(free->hdr.nvalid)); } /* * Get the index for our entry. @@ -523,12 +524,12 @@ xfs_dir2_leafn_lookup_int( /* * If it has room, return it. */ - if (unlikely(INT_GET(free->bests[fi], ARCH_CONVERT) == NULLDATAOFF)) { + if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) { XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } - if (INT_GET(free->bests[fi], ARCH_CONVERT) >= length) { + if (be16_to_cpu(free->bests[fi]) >= length) { *indexp = index; state->extravalid = 1; state->extrablk.bp = curbp; @@ -572,7 +573,7 @@ xfs_dir2_leafn_lookup_int( */ dep = (xfs_dir2_data_entry_t *) ((char *)curbp->data + - XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); /* * Compare the entry, return it if it matches. */ @@ -619,7 +620,7 @@ xfs_dir2_leafn_lookup_int( * Return the final index, that will be the insertion point. */ *indexp = index; - ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); + ASSERT(index == be16_to_cpu(leaf->hdr.count) || args->oknoent); return XFS_ERROR(ENOENT); } @@ -657,12 +658,12 @@ xfs_dir2_leafn_moveents( * destination leaf entries, open up a hole in the destination * to hold the new entries. */ - if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) { + if (start_d < be16_to_cpu(leaf_d->hdr.count)) { memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d], - (INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) * + (be16_to_cpu(leaf_d->hdr.count) - start_d) * sizeof(xfs_dir2_leaf_entry_t)); xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count, - count + INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - 1); + count + be16_to_cpu(leaf_d->hdr.count) - 1); } /* * If the source has stale leaves, count the ones in the copy range @@ -672,7 +673,7 @@ xfs_dir2_leafn_moveents( int i; /* temp leaf index */ for (i = start_s, stale = 0; i < start_s + count; i++) { - if (INT_GET(leaf_s->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) + if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; } } else @@ -687,7 +688,7 @@ xfs_dir2_leafn_moveents( * If there are source entries after the ones we copied, * delete the ones we copied by sliding the next ones down. */ - if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) { + if (start_s + count < be16_to_cpu(leaf_s->hdr.count)) { memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count], count * sizeof(xfs_dir2_leaf_entry_t)); xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1); @@ -695,10 +696,10 @@ xfs_dir2_leafn_moveents( /* * Update the headers and log them. */ - INT_MOD(leaf_s->hdr.count, ARCH_CONVERT, -(count)); - INT_MOD(leaf_s->hdr.stale, ARCH_CONVERT, -(stale)); - INT_MOD(leaf_d->hdr.count, ARCH_CONVERT, count); - INT_MOD(leaf_d->hdr.stale, ARCH_CONVERT, stale); + be16_add(&leaf_s->hdr.count, -(count)); + be16_add(&leaf_s->hdr.stale, -(stale)); + be16_add(&leaf_d->hdr.count, count); + be16_add(&leaf_d->hdr.stale, stale); xfs_dir2_leaf_log_header(tp, bp_s); xfs_dir2_leaf_log_header(tp, bp_d); xfs_dir2_leafn_check(args->dp, bp_s); @@ -719,13 +720,13 @@ xfs_dir2_leafn_order( leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0 && - INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0 && - (INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT) < INT_GET(leaf1->ents[0].hashval, ARCH_CONVERT) || - INT_GET(leaf2->ents[INT_GET(leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT) < - INT_GET(leaf1->ents[INT_GET(leaf1->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT))) + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + if (be16_to_cpu(leaf1->hdr.count) > 0 && + be16_to_cpu(leaf2->hdr.count) > 0 && + (be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) || + be32_to_cpu(leaf2->ents[be16_to_cpu(leaf2->hdr.count) - 1].hashval) < + be32_to_cpu(leaf1->ents[be16_to_cpu(leaf1->hdr.count) - 1].hashval))) return 1; return 0; } @@ -768,9 +769,9 @@ xfs_dir2_leafn_rebalance( } leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - oldsum = INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT); + oldsum = be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count); #ifdef DEBUG - oldstale = INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT); + oldstale = be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale); #endif mid = oldsum >> 1; /* @@ -780,10 +781,10 @@ xfs_dir2_leafn_rebalance( if (oldsum & 1) { xfs_dahash_t midhash; /* middle entry hash value */ - if (mid >= INT_GET(leaf1->hdr.count, ARCH_CONVERT)) - midhash = INT_GET(leaf2->ents[mid - INT_GET(leaf1->hdr.count, ARCH_CONVERT)].hashval, ARCH_CONVERT); + if (mid >= be16_to_cpu(leaf1->hdr.count)) + midhash = be32_to_cpu(leaf2->ents[mid - be16_to_cpu(leaf1->hdr.count)].hashval); else - midhash = INT_GET(leaf1->ents[mid].hashval, ARCH_CONVERT); + midhash = be32_to_cpu(leaf1->ents[mid].hashval); isleft = args->hashval <= midhash; } /* @@ -797,30 +798,30 @@ xfs_dir2_leafn_rebalance( * Calculate moved entry count. Positive means left-to-right, * negative means right-to-left. Then move the entries. */ - count = INT_GET(leaf1->hdr.count, ARCH_CONVERT) - mid + (isleft == 0); + count = be16_to_cpu(leaf1->hdr.count) - mid + (isleft == 0); if (count > 0) xfs_dir2_leafn_moveents(args, blk1->bp, - INT_GET(leaf1->hdr.count, ARCH_CONVERT) - count, blk2->bp, 0, count); + be16_to_cpu(leaf1->hdr.count) - count, blk2->bp, 0, count); else if (count < 0) xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp, - INT_GET(leaf1->hdr.count, ARCH_CONVERT), count); - ASSERT(INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT) == oldsum); - ASSERT(INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT) == oldstale); + be16_to_cpu(leaf1->hdr.count), count); + ASSERT(be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count) == oldsum); + ASSERT(be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale) == oldstale); /* * Mark whether we're inserting into the old or new leaf. */ - if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) < INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + if (be16_to_cpu(leaf1->hdr.count) < be16_to_cpu(leaf2->hdr.count)) state->inleaf = swap; - else if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > INT_GET(leaf2->hdr.count, ARCH_CONVERT)) + else if (be16_to_cpu(leaf1->hdr.count) > be16_to_cpu(leaf2->hdr.count)) state->inleaf = !swap; else state->inleaf = - swap ^ (blk1->index <= INT_GET(leaf1->hdr.count, ARCH_CONVERT)); + swap ^ (blk1->index <= be16_to_cpu(leaf1->hdr.count)); /* * Adjust the expected index for insertion. */ if (!state->inleaf) - blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); + blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); /* * Finally sanity check just to make sure we are not returning a negative index @@ -867,7 +868,7 @@ xfs_dir2_leafn_remove( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); /* * Point to the entry we're removing. */ @@ -875,17 +876,17 @@ xfs_dir2_leafn_remove( /* * Extract the data block and offset from the entry. */ - db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); ASSERT(dblk->blkno == db); - off = XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)); + off = XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)); ASSERT(dblk->index == off); /* * Kill the leaf entry by marking it stale. * Log the leaf block changes. */ - INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); + be16_add(&leaf->hdr.stale, 1); xfs_dir2_leaf_log_header(tp, bp); - INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); + lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); xfs_dir2_leaf_log_ents(tp, bp, index, index); /* * Make the data entry free. Keep track of the longest freespace @@ -894,7 +895,7 @@ xfs_dir2_leafn_remove( dbp = dblk->bp; data = dbp->data; dep = (xfs_dir2_data_entry_t *)((char *)data + off); - longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + longest = be16_to_cpu(data->hdr.bestfree[0].length); needlog = needscan = 0; xfs_dir2_data_make_free(tp, dbp, off, XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); @@ -911,7 +912,7 @@ xfs_dir2_leafn_remove( * If the longest data block freespace changes, need to update * the corresponding freeblock entry. */ - if (longest < INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + if (longest < be16_to_cpu(data->hdr.bestfree[0].length)) { int error; /* error return value */ xfs_dabuf_t *fbp; /* freeblock buffer */ xfs_dir2_db_t fdb; /* freeblock block number */ @@ -929,15 +930,15 @@ xfs_dir2_leafn_remove( return error; } free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); - ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) == + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.firstdb) == XFS_DIR2_MAX_FREE_BESTS(mp) * (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); /* * Calculate which entry we need to fix. */ findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); - longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); + longest = be16_to_cpu(data->hdr.bestfree[0].length); /* * If the data block is now empty we can get rid of it * (usually). @@ -969,7 +970,7 @@ xfs_dir2_leafn_remove( /* * One less used entry in the free table. */ - INT_MOD(free->hdr.nused, ARCH_CONVERT, -1); + free->hdr.nused = cpu_to_be32(-1); xfs_dir2_free_log_header(tp, fbp); /* * If this was the last entry in the table, we can @@ -977,21 +978,21 @@ xfs_dir2_leafn_remove( * entries at the end referring to non-existent * data blocks, get those too. */ - if (findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1) { + if (findex == be32_to_cpu(free->hdr.nvalid) - 1) { int i; /* free entry index */ for (i = findex - 1; - i >= 0 && INT_GET(free->bests[i], ARCH_CONVERT) == NULLDATAOFF; + i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF; i--) continue; - INT_SET(free->hdr.nvalid, ARCH_CONVERT, i + 1); + free->hdr.nvalid = cpu_to_be32(i + 1); logfree = 0; } /* * Not the last entry, just punch it out. */ else { - INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + free->bests[findex] = cpu_to_be16(NULLDATAOFF); logfree = 1; } /* @@ -1017,7 +1018,7 @@ xfs_dir2_leafn_remove( * the new value. */ else { - INT_SET(free->bests[findex], ARCH_CONVERT, longest); + free->bests[findex] = cpu_to_be16(longest); logfree = 1; } /* @@ -1039,7 +1040,7 @@ xfs_dir2_leafn_remove( *rval = ((uint)sizeof(leaf->hdr) + (uint)sizeof(leaf->ents[0]) * - (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT))) < + (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale))) < mp->m_dir_magicpct; return 0; } @@ -1138,9 +1139,9 @@ xfs_dir2_leafn_toosmall( */ blk = &state->path.blk[state->path.active - 1]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC); leaf = (xfs_dir2_leaf_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); if (bytes > (state->blocksize >> 1)) { /* @@ -1160,7 +1161,7 @@ xfs_dir2_leafn_toosmall( * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &rval); @@ -1176,9 +1177,9 @@ xfs_dir2_leafn_toosmall( * We prefer coalescing with the lower numbered sibling so as * to shrink a directory over time. */ - forward = INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT); + forward = be32_to_cpu(info->forw) < be32_to_cpu(info->back); for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { - blkno = forward ?INT_GET( info->forw, ARCH_CONVERT) : INT_GET(info->back, ARCH_CONVERT); + blkno = forward ? be32_to_cpu(info->forw) : be32_to_cpu(info->back); if (blkno == 0) continue; /* @@ -1194,11 +1195,11 @@ xfs_dir2_leafn_toosmall( * Count bytes in the two blocks combined. */ leaf = (xfs_dir2_leaf_t *)info; - count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = state->blocksize - (state->blocksize >> 2); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - count += INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes -= count * (uint)sizeof(leaf->ents[0]); /* * Fits with at least 25% to spare. @@ -1256,27 +1257,27 @@ xfs_dir2_leafn_unbalance( ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); /* * If there are any stale leaf entries, take this opportunity * to purge them. */ - if (INT_GET(drop_leaf->hdr.stale, ARCH_CONVERT)) + if (drop_leaf->hdr.stale) xfs_dir2_leaf_compact(args, drop_blk->bp); - if (INT_GET(save_leaf->hdr.stale, ARCH_CONVERT)) + if (save_leaf->hdr.stale) xfs_dir2_leaf_compact(args, save_blk->bp); /* * Move the entries from drop to the appropriate end of save. */ - drop_blk->hashval = INT_GET(drop_leaf->ents[INT_GET(drop_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + drop_blk->hashval = be32_to_cpu(drop_leaf->ents[be16_to_cpu(drop_leaf->hdr.count) - 1].hashval); if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp)) xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0, - INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); + be16_to_cpu(drop_leaf->hdr.count)); else xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, - INT_GET(save_leaf->hdr.count, ARCH_CONVERT), INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); - save_blk->hashval = INT_GET(save_leaf->ents[INT_GET(save_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); + be16_to_cpu(save_leaf->hdr.count), be16_to_cpu(drop_leaf->hdr.count)); + save_blk->hashval = be32_to_cpu(save_leaf->ents[be16_to_cpu(save_leaf->hdr.count) - 1].hashval); xfs_dir2_leafn_check(args->dp, save_blk->bp); } @@ -1378,7 +1379,7 @@ xfs_dir2_node_addname_int( xfs_mount_t *mp; /* filesystem mount point */ int needlog; /* need to log data header */ int needscan; /* need to rescan data frees */ - xfs_dir2_data_off_t *tagp; /* data entry tag pointer */ + __be16 *tagp; /* data entry tag pointer */ xfs_trans_t *tp; /* transaction pointer */ dp = args->dp; @@ -1397,7 +1398,7 @@ xfs_dir2_node_addname_int( */ ifbno = fblk->blkno; free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); findex = fblk->index; /* * This means the free entry showed that the data block had @@ -1405,10 +1406,10 @@ xfs_dir2_node_addname_int( * Use that data block. */ if (findex >= 0) { - ASSERT(findex < INT_GET(free->hdr.nvalid, ARCH_CONVERT)); - ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF); - ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) >= length); - dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + ASSERT(findex < be32_to_cpu(free->hdr.nvalid)); + ASSERT(be16_to_cpu(free->bests[findex]) != NULLDATAOFF); + ASSERT(be16_to_cpu(free->bests[findex]) >= length); + dbno = be32_to_cpu(free->hdr.firstdb) + findex; } /* * The data block looked at didn't have enough room. @@ -1481,20 +1482,20 @@ xfs_dir2_node_addname_int( continue; } free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); findex = 0; } /* * Look at the current free entry. Is it good enough? */ - if (INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF && - INT_GET(free->bests[findex], ARCH_CONVERT) >= length) - dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; + if (be16_to_cpu(free->bests[findex]) != NULLDATAOFF && + be16_to_cpu(free->bests[findex]) >= length) + dbno = be32_to_cpu(free->hdr.firstdb) + findex; else { /* * Are we done with the freeblock? */ - if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + if (++findex == be32_to_cpu(free->hdr.nvalid)) { /* * Drop the block. */ @@ -1608,15 +1609,15 @@ xfs_dir2_node_addname_int( * its first slot as our empty slot. */ free = fbp->data; - INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); - INT_SET(free->hdr.firstdb, ARCH_CONVERT, + free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); + free->hdr.firstdb = cpu_to_be32( (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * XFS_DIR2_MAX_FREE_BESTS(mp)); free->hdr.nvalid = 0; free->hdr.nused = 0; } else { free = fbp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); } /* @@ -1627,20 +1628,20 @@ xfs_dir2_node_addname_int( * If it's after the end of the current entries in the * freespace block, extend that table. */ - if (findex >= INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { + if (findex >= be32_to_cpu(free->hdr.nvalid)) { ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp)); - INT_SET(free->hdr.nvalid, ARCH_CONVERT, findex + 1); + free->hdr.nvalid = cpu_to_be32(findex + 1); /* * Tag new entry so nused will go up. */ - INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); + free->bests[findex] = cpu_to_be16(NULLDATAOFF); } /* * If this entry was for an empty data block * (this should always be true) then update the header. */ - if (INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF) { - INT_MOD(free->hdr.nused, ARCH_CONVERT, +1); + if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { + be32_add(&free->hdr.nused, 1); xfs_dir2_free_log_header(tp, fbp); } /* @@ -1649,7 +1650,7 @@ xfs_dir2_node_addname_int( * change again. */ data = dbp->data; - INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + free->bests[findex] = data->hdr.bestfree[0].length; logfree = 1; } /* @@ -1677,12 +1678,12 @@ xfs_dir2_node_addname_int( data = dbp->data; logfree = 0; } - ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) >= length); + ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) >= length); /* * Point to the existing unused space. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); + ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); needscan = needlog = 0; /* * Mark the first part of the unused space, inuse for us. @@ -1698,7 +1699,7 @@ xfs_dir2_node_addname_int( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); + *tagp = cpu_to_be16((char *)dep - (char *)data); xfs_dir2_data_log_entry(tp, dbp, dep); /* * Rescan the block for bestfree if needed. @@ -1713,8 +1714,8 @@ xfs_dir2_node_addname_int( /* * If the freespace entry is now wrong, update it. */ - if (INT_GET(free->bests[findex], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { - INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); + if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(data->hdr.bestfree[0].length)) { + free->bests[findex] = data->hdr.bestfree[0].length; logfree = 1; } /* @@ -1731,7 +1732,7 @@ xfs_dir2_node_addname_int( * Return the data block and offset in args, then drop the data block. */ args->blkno = (xfs_dablk_t)dbno; - args->index = INT_GET(*tagp, ARCH_CONVERT); + args->index = be16_to_cpu(*tagp); xfs_da_buf_done(dbp); return 0; } @@ -1900,10 +1901,10 @@ xfs_dir2_node_replace( * Point to the data entry. */ data = state->extrablk.bp->data; - ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); + ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); dep = (xfs_dir2_data_entry_t *) ((char *)data + - XFS_DIR2_DATAPTR_TO_OFF(state->mp, INT_GET(lep->address, ARCH_CONVERT))); + XFS_DIR2_DATAPTR_TO_OFF(state->mp, be32_to_cpu(lep->address))); ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT)); /* * Fill in the new inode number and log the entry. @@ -1966,11 +1967,11 @@ xfs_dir2_node_trim_free( return 0; } free = bp->data; - ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); /* * If there are used entries, there's nothing to do. */ - if (INT_GET(free->hdr.nused, ARCH_CONVERT) > 0) { + if (be32_to_cpu(free->hdr.nused) > 0) { xfs_da_brelse(tp, bp); *rvalp = 0; return 0; diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h index 0ab8fbd5951..c7c870ee785 100644 --- a/fs/xfs/xfs_dir2_node.h +++ b/fs/xfs/xfs_dir2_node.h @@ -41,15 +41,15 @@ struct xfs_trans; #define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ typedef struct xfs_dir2_free_hdr { - __uint32_t magic; /* XFS_DIR2_FREE_MAGIC */ - __int32_t firstdb; /* db of first entry */ - __int32_t nvalid; /* count of valid entries */ - __int32_t nused; /* count of used entries */ + __be32 magic; /* XFS_DIR2_FREE_MAGIC */ + __be32 firstdb; /* db of first entry */ + __be32 nvalid; /* count of valid entries */ + __be32 nused; /* count of used entries */ } xfs_dir2_free_hdr_t; typedef struct xfs_dir2_free { xfs_dir2_free_hdr_t hdr; /* block header */ - xfs_dir2_data_off_t bests[1]; /* best free counts */ + __be16 bests[1]; /* best free counts */ /* unused entries are -1 */ } xfs_dir2_free_t; diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index ec8e7476c8b..d98a41d1fe6 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -98,8 +98,8 @@ xfs_dir2_block_sfsize( /* * Iterate over the block's data entries by using the leaf pointers. */ - for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { - if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) + for (i = 0; i < be32_to_cpu(btp->count); i++) { + if ((addr = be32_to_cpu(blp[i].address)) == XFS_DIR2_NULL_DATAPTR) continue; /* * Calculate the pointer to the entry at hand. @@ -220,8 +220,8 @@ xfs_dir2_block_to_sf( * If it's unused, just skip over it. */ dup = (xfs_dir2_data_unused_t *)ptr; - if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { - ptr += INT_GET(dup->length, ARCH_CONVERT); + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + ptr += be16_to_cpu(dup->length); continue; } dep = (xfs_dir2_data_entry_t *)ptr; diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c index e83074016ab..ee88751c3be 100644 --- a/fs/xfs/xfs_dir_leaf.c +++ b/fs/xfs/xfs_dir_leaf.c @@ -176,7 +176,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args) ASSERT(dp->i_df.if_u1.if_data != NULL); sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { if (sfe->namelen == args->namelen && args->name[0] == sfe->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) @@ -193,7 +193,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args) XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber); sfe->namelen = args->namelen; memcpy(sfe->name, args->name, sfe->namelen); - INT_MOD(sf->hdr.count, ARCH_CONVERT, +1); + sf->hdr.count++; dp->i_d.di_size += size; xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); @@ -227,7 +227,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args) base = sizeof(xfs_dir_sf_hdr_t); sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe); if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && @@ -245,7 +245,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args) memmove(&((char *)sf)[base], &((char *)sf)[base+size], dp->i_d.di_size - (base+size)); } - INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); + sf->hdr.count--; xfs_idata_realloc(dp, -size, XFS_DATA_FORK); dp->i_d.di_size -= size; @@ -288,7 +288,7 @@ xfs_dir_shortform_lookup(xfs_da_args_t *args) return(XFS_ERROR(EEXIST)); } sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) { @@ -375,7 +375,7 @@ xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs) goto out; sfe = &sf->list[0]; - for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0; i < sf->hdr.count; i++) { args.name = (char *)(sfe->name); args.namelen = sfe->namelen; args.hashval = xfs_da_hashname((char *)(sfe->name), @@ -428,7 +428,7 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); - nsbuf = INT_GET(sf->hdr.count, ARCH_CONVERT) + 2; + nsbuf = sf->hdr.count + 2; sbsize = (nsbuf + 1) * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); @@ -460,8 +460,7 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, /* * Scan the directory data for the rest of the entries. */ - for (i = 0, sfe = &sf->list[0]; - i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { + for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || @@ -600,7 +599,7 @@ xfs_dir_shortform_replace(xfs_da_args_t *args) } ASSERT(args->namelen != 1 || args->name[0] != '.'); sfe = &sf->list[0]; - for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { + for (i = sf->hdr.count-1; i >= 0; i--) { if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) { @@ -644,7 +643,7 @@ xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_dir_leafblock_t *)tmpbuffer; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -742,11 +741,13 @@ xfs_dir_leaf_to_node(xfs_da_args_t *args) } node = bp1->data; leaf = bp2->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - INT_SET(node->btree[0].hashval, ARCH_CONVERT, INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + node->btree[0].hashval = cpu_to_be32( + INT_GET(leaf->entries[ + INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); xfs_da_buf_done(bp2); - INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); - INT_SET(node->hdr.count, ARCH_CONVERT, 1); + node->btree[0].before = cpu_to_be32(blkno); + node->hdr.count = cpu_to_be16(1); xfs_da_log_buf(args->trans, bp1, XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); xfs_da_buf_done(bp1); @@ -781,7 +782,7 @@ xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) leaf = bp->data; memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); hdr = &leaf->hdr; - INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC); + hdr->info.magic = cpu_to_be16(XFS_DIR_LEAF_MAGIC); INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); if (!hdr->firstused) INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1); @@ -860,7 +861,7 @@ xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index) int tablesize, entsize, sum, i, tmp, error; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); hdr = &leaf->hdr; entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen); @@ -940,7 +941,7 @@ xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index, int tmp, i; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE)); ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT))); @@ -1097,8 +1098,8 @@ xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); /* * Check ordering of blocks, reverse if it makes things simpler. @@ -1325,7 +1326,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(info->magic) == XFS_DIR_LEAF_MAGIC); leaf = (xfs_dir_leafblock_t *)info; count = INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) + @@ -1348,7 +1349,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = info->forw; + forward = (info->forw != 0); memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -1369,12 +1370,12 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) * We prefer coalescing with the lower numbered sibling so as * to shrink a directory over time. */ - forward = (INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT)); /* start with smaller blk num */ + forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); /* start with smaller blk num */ for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = INT_GET(info->forw, ARCH_CONVERT); + blkno = be32_to_cpu(info->forw); else - blkno = INT_GET(info->back, ARCH_CONVERT); + blkno = be32_to_cpu(info->back); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -1389,7 +1390,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) bytes = state->blocksize - (state->blocksize>>2); bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); count += INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); @@ -1447,7 +1448,7 @@ xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index) xfs_mount_t *mp; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); hdr = &leaf->hdr; mp = trans->t_mountp; ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); @@ -1599,8 +1600,8 @@ xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; @@ -1695,7 +1696,7 @@ xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); /* @@ -1782,8 +1783,8 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); - ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); @@ -1883,8 +1884,8 @@ xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) && - (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC)); + ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC) && + (be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC)); if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) && ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || @@ -1904,7 +1905,7 @@ xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_dir_leafblock_t *leaf; leaf = bp->data; - ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); if (count) *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); if (!leaf->hdr.count) @@ -1940,7 +1941,7 @@ xfs_dir_leaf_getdents_int( mp = dp->i_mount; leaf = bp->data; - if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { + if (be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) { *eobp = 1; return XFS_ERROR(ENOENT); /* XXX wrong code */ } @@ -1992,7 +1993,7 @@ xfs_dir_leaf_getdents_int( if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { xfs_dir_trace_g_du("leaf: hash not found", dp, uio); - if (!INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) + if (!leaf->hdr.info.forw) uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); /* @@ -2047,8 +2048,7 @@ xfs_dir_leaf_getdents_int( xfs_dir_trace_g_duc("leaf: middle cookie ", dp, uio, p.cook.o); - } else if ((thishash = INT_GET(leaf->hdr.info.forw, - ARCH_CONVERT))) { + } else if ((thishash = be32_to_cpu(leaf->hdr.info.forw))) { xfs_dabuf_t *bp2; xfs_dir_leafblock_t *leaf2; @@ -2064,9 +2064,9 @@ xfs_dir_leaf_getdents_int( leaf2 = bp2->data; if (unlikely( - (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + (be16_to_cpu(leaf2->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) - || (INT_GET(leaf2->hdr.info.back, ARCH_CONVERT) + || (be32_to_cpu(leaf2->hdr.info.back) != bno))) { /* GROT */ XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)", XFS_ERRLEVEL_LOW, mp, diff --git a/fs/xfs/xfs_dir_sf.h b/fs/xfs/xfs_dir_sf.h index fe44c6f4d56..5b20b4d3f57 100644 --- a/fs/xfs/xfs_dir_sf.h +++ b/fs/xfs/xfs_dir_sf.h @@ -35,19 +35,21 @@ typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t; * and the elements much be memcpy'd out into a work area to get correct * alignment for the inode number fields. */ +typedef struct xfs_dir_sf_hdr { /* constant-structure header block */ + xfs_dir_ino_t parent; /* parent dir inode number */ + __uint8_t count; /* count of active entries */ +} xfs_dir_sf_hdr_t; + +typedef struct xfs_dir_sf_entry { + xfs_dir_ino_t inumber; /* referenced inode number */ + __uint8_t namelen; /* actual length of name (no NULL) */ + __uint8_t name[1]; /* name */ +} xfs_dir_sf_entry_t; + typedef struct xfs_dir_shortform { - struct xfs_dir_sf_hdr { /* constant-structure header block */ - xfs_dir_ino_t parent; /* parent dir inode number */ - __uint8_t count; /* count of active entries */ - } hdr; - struct xfs_dir_sf_entry { - xfs_dir_ino_t inumber; /* referenced inode number */ - __uint8_t namelen; /* actual length of name (no NULL) */ - __uint8_t name[1]; /* name */ - } list[1]; /* variable sized array */ + xfs_dir_sf_hdr_t hdr; + xfs_dir_sf_entry_t list[1]; /* variable sized array */ } xfs_dir_shortform_t; -typedef struct xfs_dir_sf_hdr xfs_dir_sf_hdr_t; -typedef struct xfs_dir_sf_entry xfs_dir_sf_entry_t; /* * We generate this then sort it, so that readdirs are returned in diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h index b4c7f2bc55a..00b1540f810 100644 --- a/fs/xfs/xfs_dmapi.h +++ b/fs/xfs/xfs_dmapi.h @@ -191,14 +191,4 @@ typedef enum { extern struct bhv_vfsops xfs_dmops; -#ifdef CONFIG_XFS_DMAPI -void xfs_dm_init(struct file_system_type *); -void xfs_dm_exit(struct file_system_type *); -#define XFS_DM_INIT(fstype) xfs_dm_init(fstype) -#define XFS_DM_EXIT(fstype) xfs_dm_exit(fstype) -#else -#define XFS_DM_INIT(fstype) -#define XFS_DM_EXIT(fstype) -#endif - #endif /* __XFS_DMAPI_H__ */ diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index b4d971b0158..56caa88713a 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -462,6 +462,7 @@ xfs_fs_counts( { unsigned long s; + xfs_icsb_sync_counters_lazy(mp); s = XFS_SB_LOCK(mp); cnt->freedata = mp->m_sb.sb_fdblocks; cnt->freertx = mp->m_sb.sb_frextents; diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 8f3fae1aa98..0024892841a 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -138,8 +138,6 @@ xfs_ialloc_ag_alloc( int version; /* inode version number to use */ int isaligned; /* inode allocation at stripe unit */ /* boundary */ - xfs_dinode_core_t dic; /* a dinode_core to copy to new */ - /* inodes */ args.tp = tp; args.mp = tp->t_mountp; @@ -250,10 +248,6 @@ xfs_ialloc_ag_alloc( else version = XFS_DINODE_VERSION_1; - memset(&dic, 0, sizeof(xfs_dinode_core_t)); - INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); - INT_SET(dic.di_version, ARCH_CONVERT, version); - for (j = 0; j < nbufs; j++) { /* * Get the block. @@ -266,12 +260,13 @@ xfs_ialloc_ag_alloc( ASSERT(fbuf); ASSERT(!XFS_BUF_GETERROR(fbuf)); /* - * Loop over the inodes in this buffer. + * Set initial values for the inodes in this buffer. */ - + xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); for (i = 0; i < ninodes; i++) { free = XFS_MAKE_IPTR(args.mp, fbuf, i); - memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t)); + INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); + INT_SET(free->di_core.di_version, ARCH_CONVERT, version); INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); xfs_ialloc_log_di(tp, fbuf, i, XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 8e380a1fb79..3ce35a6f700 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -258,7 +258,7 @@ again: goto finish_inode; } else if (vp != inode_vp) { - struct inode *inode = LINVFS_GET_IP(inode_vp); + struct inode *inode = vn_to_inode(inode_vp); /* The inode is being torn down, pause and * try again. @@ -495,7 +495,7 @@ retry: if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { xfs_inode_t *ip; - vp = LINVFS_GET_VP(inode); + vp = vn_from_inode(inode); if (inode->i_state & I_NEW) { vn_initialize(inode); error = xfs_iget_core(vp, mp, tp, ino, flags, @@ -617,7 +617,7 @@ xfs_iput_new(xfs_inode_t *ip, uint lock_flags) { vnode_t *vp = XFS_ITOV(ip); - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1d7f5a7e063..88a517fad07 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -76,16 +76,18 @@ STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); */ STATIC void xfs_validate_extents( - xfs_bmbt_rec_t *ep, + xfs_ifork_t *ifp, int nrecs, int disk, xfs_exntfmt_t fmt) { + xfs_bmbt_rec_t *ep; xfs_bmbt_irec_t irec; xfs_bmbt_rec_t rec; int i; for (i = 0; i < nrecs; i++) { + ep = xfs_iext_get_ext(ifp, i); rec.l0 = get_unaligned((__uint64_t*)&ep->l0); rec.l1 = get_unaligned((__uint64_t*)&ep->l1); if (disk) @@ -94,11 +96,10 @@ xfs_validate_extents( xfs_bmbt_get_all(&rec, &irec); if (fmt == XFS_EXTFMT_NOSTATE) ASSERT(irec.br_state == XFS_EXT_NORM); - ep++; } } #else /* DEBUG */ -#define xfs_validate_extents(ep, nrecs, disk, fmt) +#define xfs_validate_extents(ifp, nrecs, disk, fmt) #endif /* DEBUG */ /* @@ -252,7 +253,8 @@ xfs_itobp( xfs_inode_t *ip, xfs_dinode_t **dipp, xfs_buf_t **bpp, - xfs_daddr_t bno) + xfs_daddr_t bno, + uint imap_flags) { xfs_buf_t *bp; int error; @@ -268,10 +270,9 @@ xfs_itobp( * inode on disk. */ imap.im_blkno = bno; - error = xfs_imap(mp, tp, ip->i_ino, &imap, XFS_IMAP_LOOKUP); - if (error != 0) { + if ((error = xfs_imap(mp, tp, ip->i_ino, &imap, + XFS_IMAP_LOOKUP | imap_flags))) return error; - } /* * If the inode number maps to a block outside the bounds @@ -335,9 +336,10 @@ xfs_itobp( * (if DEBUG kernel) or the first inode in the buffer, otherwise. */ #ifdef DEBUG - ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; + ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : + (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); #else - ni = 1; + ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; #endif for (i = 0; i < ni; i++) { int di_ok; @@ -504,7 +506,7 @@ xfs_iformat( switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) { case XFS_DINODE_FMT_LOCAL: atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); - size = (int)INT_GET(atp->hdr.totsize, ARCH_CONVERT); + size = be16_to_cpu(atp->hdr.totsize); error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); break; case XFS_DINODE_FMT_EXTENTS: @@ -597,7 +599,6 @@ xfs_iformat_extents( xfs_bmbt_rec_t *ep, *dp; xfs_ifork_t *ifp; int nex; - int real_size; int size; int i; @@ -619,23 +620,20 @@ xfs_iformat_extents( return XFS_ERROR(EFSCORRUPTED); } - real_size = 0; + ifp->if_real_bytes = 0; if (nex == 0) ifp->if_u1.if_extents = NULL; else if (nex <= XFS_INLINE_EXTS) ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; - else { - ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); - ASSERT(ifp->if_u1.if_extents != NULL); - real_size = size; - } + else + xfs_iext_add(ifp, 0, nex); + ifp->if_bytes = size; - ifp->if_real_bytes = real_size; if (size) { dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); - xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip)); - ep = ifp->if_u1.if_extents; - for (i = 0; i < nex; i++, ep++, dp++) { + xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip)); + for (i = 0; i < nex; i++, dp++) { + ep = xfs_iext_get_ext(ifp, i); ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0), ARCH_CONVERT); ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), @@ -646,7 +644,7 @@ xfs_iformat_extents( if (whichfork != XFS_DATA_FORK || XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) if (unlikely(xfs_check_nostate_extents( - ifp->if_u1.if_extents, nex))) { + ifp, 0, nex))) { XFS_ERROR_REPORT("xfs_iformat_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); @@ -871,9 +869,8 @@ xfs_iread( * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will * know that this is a new incore inode. */ - error = xfs_itobp(mp, tp, ip, &dip, &bp, bno); - - if (error != 0) { + error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0); + if (error) { kmem_zone_free(xfs_inode_zone, ip); return error; } @@ -1015,6 +1012,7 @@ xfs_iread_extents( { int error; xfs_ifork_t *ifp; + xfs_extnum_t nextents; size_t size; if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { @@ -1022,26 +1020,24 @@ xfs_iread_extents( ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } - size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t); + nextents = XFS_IFORK_NEXTENTS(ip, whichfork); + size = nextents * sizeof(xfs_bmbt_rec_t); ifp = XFS_IFORK_PTR(ip, whichfork); + /* * We know that the size is valid (it's checked in iformat_btree) */ - ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); - ASSERT(ifp->if_u1.if_extents != NULL); ifp->if_lastex = NULLEXTNUM; - ifp->if_bytes = ifp->if_real_bytes = (int)size; + ifp->if_bytes = ifp->if_real_bytes = 0; ifp->if_flags |= XFS_IFEXTENTS; + xfs_iext_add(ifp, 0, nextents); error = xfs_bmap_read_extents(tp, ip, whichfork); if (error) { - kmem_free(ifp->if_u1.if_extents, size); - ifp->if_u1.if_extents = NULL; - ifp->if_bytes = ifp->if_real_bytes = 0; + xfs_iext_destroy(ifp); ifp->if_flags &= ~XFS_IFEXTENTS; return error; } - xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents, - XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip)); + xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip)); return 0; } @@ -1376,10 +1372,10 @@ xfs_itrunc_trace( (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), (void*)(unsigned long)(toss_finish & 0xffffffff), (void*)(unsigned long)current_cpu(), - (void*)0, - (void*)0, - (void*)0, - (void*)0); + (void*)(unsigned long)current_pid(), + (void*)NULL, + (void*)NULL, + (void*)NULL); } #else #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) @@ -1397,6 +1393,16 @@ xfs_itrunc_trace( * calling into the buffer/page cache code and we can't hold the * inode lock when we do so. * + * We need to wait for any direct I/Os in flight to complete before we + * proceed with the truncate. This is needed to prevent the extents + * being read or written by the direct I/Os from being removed while the + * I/O is in flight as there is no other method of synchronising + * direct I/O with the truncate operation. Also, because we hold + * the IOLOCK in exclusive mode, we prevent new direct I/Os from being + * started until the truncate completes and drops the lock. Essentially, + * the vn_iowait() call forms an I/O barrier that provides strict ordering + * between direct I/Os and the truncate operation. + * * The flags parameter can have either the value XFS_ITRUNC_DEFINITE * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used * in the case that the caller is locking things out of order and @@ -1424,6 +1430,9 @@ xfs_itruncate_start( mp = ip->i_mount; vp = XFS_ITOV(ip); + + vn_iowait(vp); /* wait for the completion of any pending DIOs */ + /* * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers * overlapping the region being removed. We have to use @@ -1899,7 +1908,7 @@ xfs_iunlink( * Here we put the head pointer into our next pointer, * and then we fall through to point the head at us. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); if (error) { return error; } @@ -2008,7 +2017,7 @@ xfs_iunlink_remove( * of dealing with the buffer when there is no need to * change it. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2070,7 +2079,7 @@ xfs_iunlink_remove( * Now last_ibp points to the buffer previous to us on * the unlinked list. Pull us from the list. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2476,92 +2485,6 @@ xfs_iroot_realloc( /* - * This is called when the amount of space needed for if_extents - * is increased or decreased. The change in size is indicated by - * the number of extents that need to be added or deleted in the - * ext_diff parameter. - * - * If the amount of space needed has decreased below the size of the - * inline buffer, then switch to using the inline buffer. Otherwise, - * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer - * to what is needed. - * - * ip -- the inode whose if_extents area is changing - * ext_diff -- the change in the number of extents, positive or negative, - * requested for the if_extents array. - */ -void -xfs_iext_realloc( - xfs_inode_t *ip, - int ext_diff, - int whichfork) -{ - int byte_diff; - xfs_ifork_t *ifp; - int new_size; - uint rnew_size; - - if (ext_diff == 0) { - return; - } - - ifp = XFS_IFORK_PTR(ip, whichfork); - byte_diff = ext_diff * (uint)sizeof(xfs_bmbt_rec_t); - new_size = (int)ifp->if_bytes + byte_diff; - ASSERT(new_size >= 0); - - if (new_size == 0) { - if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { - ASSERT(ifp->if_real_bytes != 0); - kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); - } - ifp->if_u1.if_extents = NULL; - rnew_size = 0; - } else if (new_size <= sizeof(ifp->if_u2.if_inline_ext)) { - /* - * If the valid extents can fit in if_inline_ext, - * copy them from the malloc'd vector and free it. - */ - if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { - /* - * For now, empty files are format EXTENTS, - * so the if_extents pointer is null. - */ - if (ifp->if_u1.if_extents) { - memcpy(ifp->if_u2.if_inline_ext, - ifp->if_u1.if_extents, new_size); - kmem_free(ifp->if_u1.if_extents, - ifp->if_real_bytes); - } - ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; - } - rnew_size = 0; - } else { - rnew_size = new_size; - if ((rnew_size & (rnew_size - 1)) != 0) - rnew_size = xfs_iroundup(rnew_size); - /* - * Stuck with malloc/realloc. - */ - if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) { - ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) - kmem_alloc(rnew_size, KM_SLEEP); - memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, - sizeof(ifp->if_u2.if_inline_ext)); - } else if (rnew_size != ifp->if_real_bytes) { - ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) - kmem_realloc(ifp->if_u1.if_extents, - rnew_size, - ifp->if_real_bytes, - KM_NOFS); - } - } - ifp->if_real_bytes = rnew_size; - ifp->if_bytes = new_size; -} - - -/* * This is called when the amount of space needed for if_data * is increased or decreased. The change in size is indicated by * the number of bytes that need to be added or deleted in the @@ -2720,12 +2643,11 @@ xfs_idestroy_fork( ifp->if_real_bytes = 0; } } else if ((ifp->if_flags & XFS_IFEXTENTS) && - (ifp->if_u1.if_extents != NULL) && - (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)) { + ((ifp->if_flags & XFS_IFEXTIREC) || + ((ifp->if_u1.if_extents != NULL) && + (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { ASSERT(ifp->if_real_bytes != 0); - kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); - ifp->if_u1.if_extents = NULL; - ifp->if_real_bytes = 0; + xfs_iext_destroy(ifp); } ASSERT(ifp->if_u1.if_extents == NULL || ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); @@ -2814,7 +2736,7 @@ xfs_iunpin( /* make sync come back and flush this inode */ if (vp) { - struct inode *inode = LINVFS_GET_IP(vp); + struct inode *inode = vn_to_inode(vp); if (!(inode->i_state & I_NEW)) mark_inode_dirty_sync(inode); @@ -2902,16 +2824,15 @@ xfs_iextents_copy( * the delayed ones. There must be at least one * non-delayed extent. */ - ep = ifp->if_u1.if_extents; dest_ep = buffer; copied = 0; for (i = 0; i < nrecs; i++) { + ep = xfs_iext_get_ext(ifp, i); start_block = xfs_bmbt_get_startblock(ep); if (ISNULLSTARTBLOCK(start_block)) { /* * It's a delayed allocation extent, so skip it. */ - ep++; continue; } @@ -2921,11 +2842,10 @@ xfs_iextents_copy( put_unaligned(INT_GET(ep->l1, ARCH_CONVERT), (__uint64_t*)&dest_ep->l1); dest_ep++; - ep++; copied++; } ASSERT(copied != 0); - xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip)); + xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip)); return (copied * (uint)sizeof(xfs_bmbt_rec_t)); } @@ -2995,8 +2915,10 @@ xfs_iflush_fork( case XFS_DINODE_FMT_EXTENTS: ASSERT((ifp->if_flags & XFS_IFEXTENTS) || !(iip->ili_format.ilf_fields & extflag[whichfork])); - ASSERT((ifp->if_u1.if_extents != NULL) || (ifp->if_bytes == 0)); - ASSERT((ifp->if_u1.if_extents == NULL) || (ifp->if_bytes > 0)); + ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || + (ifp->if_bytes == 0)); + ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || + (ifp->if_bytes > 0)); if ((iip->ili_format.ilf_fields & extflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); @@ -3114,8 +3036,8 @@ xfs_iflush( /* * Get the buffer containing the on-disk inode. */ - error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); - if (error != 0) { + error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0); + if (error) { xfs_ifunlock(ip); return error; } @@ -3610,7 +3532,7 @@ xfs_iaccess( { int error; mode_t orgmode = mode; - struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); + struct inode *inode = vn_to_inode(XFS_ITOV(ip)); if (mode & S_IWUSR) { umode_t imode = inode->i_mode; @@ -3704,3 +3626,1100 @@ xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); } #endif + +/* + * Return a pointer to the extent record at file index idx. + */ +xfs_bmbt_rec_t * +xfs_iext_get_ext( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx) /* index of target extent */ +{ + ASSERT(idx >= 0); + if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { + return ifp->if_u1.if_ext_irec->er_extbuf; + } else if (ifp->if_flags & XFS_IFEXTIREC) { + xfs_ext_irec_t *erp; /* irec pointer */ + int erp_idx = 0; /* irec index */ + xfs_extnum_t page_idx = idx; /* ext index in target list */ + + erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); + return &erp->er_extbuf[page_idx]; + } else if (ifp->if_bytes) { + return &ifp->if_u1.if_extents[idx]; + } else { + return NULL; + } +} + +/* + * Insert new item(s) into the extent records for incore inode + * fork 'ifp'. 'count' new items are inserted at index 'idx'. + */ +void +xfs_iext_insert( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new) /* items to insert */ +{ + xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_extnum_t i; /* extent record index */ + + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + xfs_iext_add(ifp, idx, count); + for (i = idx; i < idx + count; i++, new++) { + ep = xfs_iext_get_ext(ifp, i); + xfs_bmbt_set_all(ep, new); + } +} + +/* + * This is called when the amount of space required for incore file + * extents needs to be increased. The ext_diff parameter stores the + * number of new extents being added and the idx parameter contains + * the extent index where the new extents will be added. If the new + * extents are being appended, then we just need to (re)allocate and + * initialize the space. Otherwise, if the new extents are being + * inserted into the middle of the existing entries, a bit more work + * is required to make room for the new extents to be inserted. The + * caller is responsible for filling in the new extent entries upon + * return. + */ +void +xfs_iext_add( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin adding exts */ + int ext_diff) /* nubmer of extents to add */ +{ + int byte_diff; /* new bytes being added */ + int new_size; /* size of extents after adding */ + xfs_extnum_t nextents; /* number of extents in file */ + + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT((idx >= 0) && (idx <= nextents)); + byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); + new_size = ifp->if_bytes + byte_diff; + /* + * If the new number of extents (nextents + ext_diff) + * fits inside the inode, then continue to use the inline + * extent buffer. + */ + if (nextents + ext_diff <= XFS_INLINE_EXTS) { + if (idx < nextents) { + memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], + &ifp->if_u2.if_inline_ext[idx], + (nextents - idx) * sizeof(xfs_bmbt_rec_t)); + memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); + } + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + ifp->if_real_bytes = 0; + ifp->if_lastex = nextents + ext_diff; + } + /* + * Otherwise use a linear (direct) extent list. + * If the extents are currently inside the inode, + * xfs_iext_realloc_direct will switch us from + * inline to direct extent allocation mode. + */ + else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { + xfs_iext_realloc_direct(ifp, new_size); + if (idx < nextents) { + memmove(&ifp->if_u1.if_extents[idx + ext_diff], + &ifp->if_u1.if_extents[idx], + (nextents - idx) * sizeof(xfs_bmbt_rec_t)); + memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); + } + } + /* Indirection array */ + else { + xfs_ext_irec_t *erp; + int erp_idx = 0; + int page_idx = idx; + + ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); + if (ifp->if_flags & XFS_IFEXTIREC) { + erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); + } else { + xfs_iext_irec_init(ifp); + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + erp = ifp->if_u1.if_ext_irec; + } + /* Extents fit in target extent page */ + if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { + if (page_idx < erp->er_extcount) { + memmove(&erp->er_extbuf[page_idx + ext_diff], + &erp->er_extbuf[page_idx], + (erp->er_extcount - page_idx) * + sizeof(xfs_bmbt_rec_t)); + memset(&erp->er_extbuf[page_idx], 0, byte_diff); + } + erp->er_extcount += ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); + } + /* Insert a new extent page */ + else if (erp) { + xfs_iext_add_indirect_multi(ifp, + erp_idx, page_idx, ext_diff); + } + /* + * If extent(s) are being appended to the last page in + * the indirection array and the new extent(s) don't fit + * in the page, then erp is NULL and erp_idx is set to + * the next index needed in the indirection array. + */ + else { + int count = ext_diff; + + while (count) { + erp = xfs_iext_irec_new(ifp, erp_idx); + erp->er_extcount = count; + count -= MIN(count, (int)XFS_LINEAR_EXTS); + if (count) { + erp_idx++; + } + } + } + } + ifp->if_bytes = new_size; +} + +/* + * This is called when incore extents are being added to the indirection + * array and the new extents do not fit in the target extent list. The + * erp_idx parameter contains the irec index for the target extent list + * in the indirection array, and the idx parameter contains the extent + * index within the list. The number of extents being added is stored + * in the count parameter. + * + * |-------| |-------| + * | | | | idx - number of extents before idx + * | idx | | count | + * | | | | count - number of extents being inserted at idx + * |-------| |-------| + * | count | | nex2 | nex2 - number of extents after idx + count + * |-------| |-------| + */ +void +xfs_iext_add_indirect_multi( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx, /* target extent irec index */ + xfs_extnum_t idx, /* index within target list */ + int count) /* new extents being added */ +{ + int byte_diff; /* new bytes being added */ + xfs_ext_irec_t *erp; /* pointer to irec entry */ + xfs_extnum_t ext_diff; /* number of extents to add */ + xfs_extnum_t ext_cnt; /* new extents still needed */ + xfs_extnum_t nex2; /* extents after idx + count */ + xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ + int nlists; /* number of irec's (lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + nex2 = erp->er_extcount - idx; + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + + /* + * Save second part of target extent list + * (all extents past */ + if (nex2) { + byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); + nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); + memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); + erp->er_extcount -= nex2; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); + memset(&erp->er_extbuf[idx], 0, byte_diff); + } + + /* + * Add the new extents to the end of the target + * list, then allocate new irec record(s) and + * extent buffer(s) as needed to store the rest + * of the new extents. + */ + ext_cnt = count; + ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); + if (ext_diff) { + erp->er_extcount += ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); + ext_cnt -= ext_diff; + } + while (ext_cnt) { + erp_idx++; + erp = xfs_iext_irec_new(ifp, erp_idx); + ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); + erp->er_extcount = ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); + ext_cnt -= ext_diff; + } + + /* Add nex2 extents back to indirection array */ + if (nex2) { + xfs_extnum_t ext_avail; + int i; + + byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); + ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; + i = 0; + /* + * If nex2 extents fit in the current page, append + * nex2_ep after the new extents. + */ + if (nex2 <= ext_avail) { + i = erp->er_extcount; + } + /* + * Otherwise, check if space is available in the + * next page. + */ + else if ((erp_idx < nlists - 1) && + (nex2 <= (ext_avail = XFS_LINEAR_EXTS - + ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { + erp_idx++; + erp++; + /* Create a hole for nex2 extents */ + memmove(&erp->er_extbuf[nex2], erp->er_extbuf, + erp->er_extcount * sizeof(xfs_bmbt_rec_t)); + } + /* + * Final choice, create a new extent page for + * nex2 extents. + */ + else { + erp_idx++; + erp = xfs_iext_irec_new(ifp, erp_idx); + } + memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); + kmem_free(nex2_ep, byte_diff); + erp->er_extcount += nex2; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); + } +} + +/* + * This is called when the amount of space required for incore file + * extents needs to be decreased. The ext_diff parameter stores the + * number of extents to be removed and the idx parameter contains + * the extent index where the extents will be removed from. + * + * If the amount of space needed has decreased below the linear + * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous + * extent array. Otherwise, use kmem_realloc() to adjust the + * size to what is needed. + */ +void +xfs_iext_remove( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing exts */ + int ext_diff) /* number of extents to remove */ +{ + xfs_extnum_t nextents; /* number of extents in file */ + int new_size; /* size of extents after removal */ + + ASSERT(ext_diff > 0); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); + + if (new_size == 0) { + xfs_iext_destroy(ifp); + } else if (ifp->if_flags & XFS_IFEXTIREC) { + xfs_iext_remove_indirect(ifp, idx, ext_diff); + } else if (ifp->if_real_bytes) { + xfs_iext_remove_direct(ifp, idx, ext_diff); + } else { + xfs_iext_remove_inline(ifp, idx, ext_diff); + } + ifp->if_bytes = new_size; +} + +/* + * This removes ext_diff extents from the inline buffer, beginning + * at extent index idx. + */ +void +xfs_iext_remove_inline( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing exts */ + int ext_diff) /* number of extents to remove */ +{ + int nextents; /* number of extents in file */ + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); + ASSERT(idx < XFS_INLINE_EXTS); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(((nextents - ext_diff) > 0) && + (nextents - ext_diff) < XFS_INLINE_EXTS); + + if (idx + ext_diff < nextents) { + memmove(&ifp->if_u2.if_inline_ext[idx], + &ifp->if_u2.if_inline_ext[idx + ext_diff], + (nextents - (idx + ext_diff)) * + sizeof(xfs_bmbt_rec_t)); + memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], + 0, ext_diff * sizeof(xfs_bmbt_rec_t)); + } else { + memset(&ifp->if_u2.if_inline_ext[idx], 0, + ext_diff * sizeof(xfs_bmbt_rec_t)); + } +} + +/* + * This removes ext_diff extents from a linear (direct) extent list, + * beginning at extent index idx. If the extents are being removed + * from the end of the list (ie. truncate) then we just need to re- + * allocate the list to remove the extra space. Otherwise, if the + * extents are being removed from the middle of the existing extent + * entries, then we first need to move the extent records beginning + * at idx + ext_diff up in the list to overwrite the records being + * removed, then remove the extra space via kmem_realloc. + */ +void +xfs_iext_remove_direct( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing exts */ + int ext_diff) /* number of extents to remove */ +{ + xfs_extnum_t nextents; /* number of extents in file */ + int new_size; /* size of extents after removal */ + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); + new_size = ifp->if_bytes - + (ext_diff * sizeof(xfs_bmbt_rec_t)); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + + if (new_size == 0) { + xfs_iext_destroy(ifp); + return; + } + /* Move extents up in the list (if needed) */ + if (idx + ext_diff < nextents) { + memmove(&ifp->if_u1.if_extents[idx], + &ifp->if_u1.if_extents[idx + ext_diff], + (nextents - (idx + ext_diff)) * + sizeof(xfs_bmbt_rec_t)); + } + memset(&ifp->if_u1.if_extents[nextents - ext_diff], + 0, ext_diff * sizeof(xfs_bmbt_rec_t)); + /* + * Reallocate the direct extent list. If the extents + * will fit inside the inode then xfs_iext_realloc_direct + * will switch from direct to inline extent allocation + * mode for us. + */ + xfs_iext_realloc_direct(ifp, new_size); + ifp->if_bytes = new_size; +} + +/* + * This is called when incore extents are being removed from the + * indirection array and the extents being removed span multiple extent + * buffers. The idx parameter contains the file extent index where we + * want to begin removing extents, and the count parameter contains + * how many extents need to be removed. + * + * |-------| |-------| + * | nex1 | | | nex1 - number of extents before idx + * |-------| | count | + * | | | | count - number of extents being removed at idx + * | count | |-------| + * | | | nex2 | nex2 - number of extents after idx + count + * |-------| |-------| + */ +void +xfs_iext_remove_indirect( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t idx, /* index to begin removing extents */ + int count) /* number of extents to remove */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + int erp_idx = 0; /* indirection array index */ + xfs_extnum_t ext_cnt; /* extents left to remove */ + xfs_extnum_t ext_diff; /* extents to remove in current list */ + xfs_extnum_t nex1; /* number of extents before idx */ + xfs_extnum_t nex2; /* extents after idx + count */ + int nlists; /* entries in indirecton array */ + int page_idx = idx; /* index in target extent list */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); + ASSERT(erp != NULL); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + nex1 = page_idx; + ext_cnt = count; + while (ext_cnt) { + nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); + ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); + /* + * Check for deletion of entire list; + * xfs_iext_irec_remove() updates extent offsets. + */ + if (ext_diff == erp->er_extcount) { + xfs_iext_irec_remove(ifp, erp_idx); + ext_cnt -= ext_diff; + nex1 = 0; + if (ext_cnt) { + ASSERT(erp_idx < ifp->if_real_bytes / + XFS_IEXT_BUFSZ); + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + nex1 = 0; + continue; + } else { + break; + } + } + /* Move extents up (if needed) */ + if (nex2) { + memmove(&erp->er_extbuf[nex1], + &erp->er_extbuf[nex1 + ext_diff], + nex2 * sizeof(xfs_bmbt_rec_t)); + } + /* Zero out rest of page */ + memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - + ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); + /* Update remaining counters */ + erp->er_extcount -= ext_diff; + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); + ext_cnt -= ext_diff; + nex1 = 0; + erp_idx++; + erp++; + } + ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); + xfs_iext_irec_compact(ifp); +} + +/* + * Create, destroy, or resize a linear (direct) block of extents. + */ +void +xfs_iext_realloc_direct( + xfs_ifork_t *ifp, /* inode fork pointer */ + int new_size) /* new size of extents */ +{ + int rnew_size; /* real new size of extents */ + + rnew_size = new_size; + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || + ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && + (new_size != ifp->if_real_bytes))); + + /* Free extent records */ + if (new_size == 0) { + xfs_iext_destroy(ifp); + } + /* Resize direct extent list and zero any new bytes */ + else if (ifp->if_real_bytes) { + /* Check if extents will fit inside the inode */ + if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { + xfs_iext_direct_to_inline(ifp, new_size / + (uint)sizeof(xfs_bmbt_rec_t)); + ifp->if_bytes = new_size; + return; + } + if ((new_size & (new_size - 1)) != 0) { + rnew_size = xfs_iroundup(new_size); + } + if (rnew_size != ifp->if_real_bytes) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_realloc(ifp->if_u1.if_extents, + rnew_size, + ifp->if_real_bytes, + KM_SLEEP); + } + if (rnew_size > ifp->if_real_bytes) { + memset(&ifp->if_u1.if_extents[ifp->if_bytes / + (uint)sizeof(xfs_bmbt_rec_t)], 0, + rnew_size - ifp->if_real_bytes); + } + } + /* + * Switch from the inline extent buffer to a direct + * extent list. Be sure to include the inline extent + * bytes in new_size. + */ + else { + new_size += ifp->if_bytes; + if ((new_size & (new_size - 1)) != 0) { + rnew_size = xfs_iroundup(new_size); + } + xfs_iext_inline_to_direct(ifp, rnew_size); + } + ifp->if_real_bytes = rnew_size; + ifp->if_bytes = new_size; +} + +/* + * Switch from linear (direct) extent records to inline buffer. + */ +void +xfs_iext_direct_to_inline( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t nextents) /* number of extents in file */ +{ + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + ASSERT(nextents <= XFS_INLINE_EXTS); + /* + * The inline buffer was zeroed when we switched + * from inline to direct extent allocation mode, + * so we don't need to clear it here. + */ + memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, + nextents * sizeof(xfs_bmbt_rec_t)); + kmem_free(ifp->if_u1.if_extents, KM_SLEEP); + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + ifp->if_real_bytes = 0; +} + +/* + * Switch from inline buffer to linear (direct) extent records. + * new_size should already be rounded up to the next power of 2 + * by the caller (when appropriate), so use new_size as it is. + * However, since new_size may be rounded up, we can't update + * if_bytes here. It is the caller's responsibility to update + * if_bytes upon return. + */ +void +xfs_iext_inline_to_direct( + xfs_ifork_t *ifp, /* inode fork pointer */ + int new_size) /* number of extents in file */ +{ + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_alloc(new_size, KM_SLEEP); + memset(ifp->if_u1.if_extents, 0, new_size); + if (ifp->if_bytes) { + memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, + ifp->if_bytes); + memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * + sizeof(xfs_bmbt_rec_t)); + } + ifp->if_real_bytes = new_size; +} + +/* + * Resize an extent indirection array to new_size bytes. + */ +void +xfs_iext_realloc_indirect( + xfs_ifork_t *ifp, /* inode fork pointer */ + int new_size) /* new indirection array size */ +{ + int nlists; /* number of irec's (ex lists) */ + int size; /* current indirection array size */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + size = nlists * sizeof(xfs_ext_irec_t); + ASSERT(ifp->if_real_bytes); + ASSERT((new_size >= 0) && (new_size != size)); + if (new_size == 0) { + xfs_iext_destroy(ifp); + } else { + ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) + kmem_realloc(ifp->if_u1.if_ext_irec, + new_size, size, KM_SLEEP); + } +} + +/* + * Switch from indirection array to linear (direct) extent allocations. + */ +void +xfs_iext_indirect_to_direct( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_extnum_t nextents; /* number of extents in file */ + int size; /* size of file extents */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(nextents <= XFS_LINEAR_EXTS); + size = nextents * sizeof(xfs_bmbt_rec_t); + + xfs_iext_irec_compact_full(ifp); + ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); + + ep = ifp->if_u1.if_ext_irec->er_extbuf; + kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t)); + ifp->if_flags &= ~XFS_IFEXTIREC; + ifp->if_u1.if_extents = ep; + ifp->if_bytes = size; + if (nextents < XFS_LINEAR_EXTS) { + xfs_iext_realloc_direct(ifp, size); + } +} + +/* + * Free incore file extents. + */ +void +xfs_iext_destroy( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + if (ifp->if_flags & XFS_IFEXTIREC) { + int erp_idx; + int nlists; + + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { + xfs_iext_irec_remove(ifp, erp_idx); + } + ifp->if_flags &= ~XFS_IFEXTIREC; + } else if (ifp->if_real_bytes) { + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + } else if (ifp->if_bytes) { + memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * + sizeof(xfs_bmbt_rec_t)); + } + ifp->if_u1.if_extents = NULL; + ifp->if_real_bytes = 0; + ifp->if_bytes = 0; +} + +/* + * Return a pointer to the extent record for file system block bno. + */ +xfs_bmbt_rec_t * /* pointer to found extent record */ +xfs_iext_bno_to_ext( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_fileoff_t bno, /* block number to search for */ + xfs_extnum_t *idxp) /* index of target extent */ +{ + xfs_bmbt_rec_t *base; /* pointer to first extent */ + xfs_filblks_t blockcount = 0; /* number of blocks in extent */ + xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */ + xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ + int high; /* upper boundry in search */ + xfs_extnum_t idx = 0; /* index of target extent */ + int low; /* lower boundry in search */ + xfs_extnum_t nextents; /* number of file extents */ + xfs_fileoff_t startoff = 0; /* start offset of extent */ + + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + if (nextents == 0) { + *idxp = 0; + return NULL; + } + low = 0; + if (ifp->if_flags & XFS_IFEXTIREC) { + /* Find target extent list */ + int erp_idx = 0; + erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); + base = erp->er_extbuf; + high = erp->er_extcount - 1; + } else { + base = ifp->if_u1.if_extents; + high = nextents - 1; + } + /* Binary search extent records */ + while (low <= high) { + idx = (low + high) >> 1; + ep = base + idx; + startoff = xfs_bmbt_get_startoff(ep); + blockcount = xfs_bmbt_get_blockcount(ep); + if (bno < startoff) { + high = idx - 1; + } else if (bno >= startoff + blockcount) { + low = idx + 1; + } else { + /* Convert back to file-based extent index */ + if (ifp->if_flags & XFS_IFEXTIREC) { + idx += erp->er_extoff; + } + *idxp = idx; + return ep; + } + } + /* Convert back to file-based extent index */ + if (ifp->if_flags & XFS_IFEXTIREC) { + idx += erp->er_extoff; + } + if (bno >= startoff + blockcount) { + if (++idx == nextents) { + ep = NULL; + } else { + ep = xfs_iext_get_ext(ifp, idx); + } + } + *idxp = idx; + return ep; +} + +/* + * Return a pointer to the indirection array entry containing the + * extent record for filesystem block bno. Store the index of the + * target irec in *erp_idxp. + */ +xfs_ext_irec_t * /* pointer to found extent record */ +xfs_iext_bno_to_irec( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_fileoff_t bno, /* block number to search for */ + int *erp_idxp) /* irec index of target ext list */ +{ + xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ + xfs_ext_irec_t *erp_next; /* next indirection array entry */ + int erp_idx; /* indirection array index */ + int nlists; /* number of extent irec's (lists) */ + int high; /* binary search upper limit */ + int low; /* binary search lower limit */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp_idx = 0; + low = 0; + high = nlists - 1; + while (low <= high) { + erp_idx = (low + high) >> 1; + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; + if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { + high = erp_idx - 1; + } else if (erp_next && bno >= + xfs_bmbt_get_startoff(erp_next->er_extbuf)) { + low = erp_idx + 1; + } else { + break; + } + } + *erp_idxp = erp_idx; + return erp; +} + +/* + * Return a pointer to the indirection array entry containing the + * extent record at file extent index *idxp. Store the index of the + * target irec in *erp_idxp and store the page index of the target + * extent record in *idxp. + */ +xfs_ext_irec_t * +xfs_iext_idx_to_irec( + xfs_ifork_t *ifp, /* inode fork pointer */ + xfs_extnum_t *idxp, /* extent index (file -> page) */ + int *erp_idxp, /* pointer to target irec */ + int realloc) /* new bytes were just added */ +{ + xfs_ext_irec_t *prev; /* pointer to previous irec */ + xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ + int erp_idx; /* indirection array index */ + int nlists; /* number of irec's (ex lists) */ + int high; /* binary search upper limit */ + int low; /* binary search lower limit */ + xfs_extnum_t page_idx = *idxp; /* extent index in target list */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + ASSERT(page_idx >= 0 && page_idx <= + ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp_idx = 0; + low = 0; + high = nlists - 1; + + /* Binary search extent irec's */ + while (low <= high) { + erp_idx = (low + high) >> 1; + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + prev = erp_idx > 0 ? erp - 1 : NULL; + if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && + realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { + high = erp_idx - 1; + } else if (page_idx > erp->er_extoff + erp->er_extcount || + (page_idx == erp->er_extoff + erp->er_extcount && + !realloc)) { + low = erp_idx + 1; + } else if (page_idx == erp->er_extoff + erp->er_extcount && + erp->er_extcount == XFS_LINEAR_EXTS) { + ASSERT(realloc); + page_idx = 0; + erp_idx++; + erp = erp_idx < nlists ? erp + 1 : NULL; + break; + } else { + page_idx -= erp->er_extoff; + break; + } + } + *idxp = page_idx; + *erp_idxp = erp_idx; + return(erp); +} + +/* + * Allocate and initialize an indirection array once the space needed + * for incore extents increases above XFS_IEXT_BUFSZ. + */ +void +xfs_iext_irec_init( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + xfs_extnum_t nextents; /* number of extents in file */ + + ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(nextents <= XFS_LINEAR_EXTS); + + erp = (xfs_ext_irec_t *) + kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); + + if (nextents == 0) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); + } else if (!ifp->if_real_bytes) { + xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); + } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { + xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); + } + erp->er_extbuf = ifp->if_u1.if_extents; + erp->er_extcount = nextents; + erp->er_extoff = 0; + + ifp->if_flags |= XFS_IFEXTIREC; + ifp->if_real_bytes = XFS_IEXT_BUFSZ; + ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); + ifp->if_u1.if_ext_irec = erp; + + return; +} + +/* + * Allocate and initialize a new entry in the indirection array. + */ +xfs_ext_irec_t * +xfs_iext_irec_new( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx) /* index for new irec */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + int i; /* loop counter */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + + /* Resize indirection array */ + xfs_iext_realloc_indirect(ifp, ++nlists * + sizeof(xfs_ext_irec_t)); + /* + * Move records down in the array so the + * new page can use erp_idx. + */ + erp = ifp->if_u1.if_ext_irec; + for (i = nlists - 1; i > erp_idx; i--) { + memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); + } + ASSERT(i == erp_idx); + + /* Initialize new extent record */ + erp = ifp->if_u1.if_ext_irec; + erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *) + kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); + ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; + memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); + erp[erp_idx].er_extcount = 0; + erp[erp_idx].er_extoff = erp_idx > 0 ? + erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; + return (&erp[erp_idx]); +} + +/* + * Remove a record from the indirection array. + */ +void +xfs_iext_irec_remove( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx) /* irec index to remove */ +{ + xfs_ext_irec_t *erp; /* indirection array pointer */ + int i; /* loop counter */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + if (erp->er_extbuf) { + xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, + -erp->er_extcount); + kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ); + } + /* Compact extent records */ + erp = ifp->if_u1.if_ext_irec; + for (i = erp_idx; i < nlists - 1; i++) { + memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); + } + /* + * Manually free the last extent record from the indirection + * array. A call to xfs_iext_realloc_indirect() with a size + * of zero would result in a call to xfs_iext_destroy() which + * would in turn call this function again, creating a nasty + * infinite loop. + */ + if (--nlists) { + xfs_iext_realloc_indirect(ifp, + nlists * sizeof(xfs_ext_irec_t)); + } else { + kmem_free(ifp->if_u1.if_ext_irec, + sizeof(xfs_ext_irec_t)); + } + ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; +} + +/* + * This is called to clean up large amounts of unused memory allocated + * by the indirection array. Before compacting anything though, verify + * that the indirection array is still needed and switch back to the + * linear extent list (or even the inline buffer) if possible. The + * compaction policy is as follows: + * + * Full Compaction: Extents fit into a single page (or inline buffer) + * Full Compaction: Extents occupy less than 10% of allocated space + * Partial Compaction: Extents occupy > 10% and < 50% of allocated space + * No Compaction: Extents occupy at least 50% of allocated space + */ +void +xfs_iext_irec_compact( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_extnum_t nextents; /* number of extents in file */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + + if (nextents == 0) { + xfs_iext_destroy(ifp); + } else if (nextents <= XFS_INLINE_EXTS) { + xfs_iext_indirect_to_direct(ifp); + xfs_iext_direct_to_inline(ifp, nextents); + } else if (nextents <= XFS_LINEAR_EXTS) { + xfs_iext_indirect_to_direct(ifp); + } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { + xfs_iext_irec_compact_full(ifp); + } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { + xfs_iext_irec_compact_pages(ifp); + } +} + +/* + * Combine extents from neighboring extent pages. + */ +void +xfs_iext_irec_compact_pages( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ + int erp_idx = 0; /* indirection array index */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + while (erp_idx < nlists - 1) { + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + erp_next = erp + 1; + if (erp_next->er_extcount <= + (XFS_LINEAR_EXTS - erp->er_extcount)) { + memmove(&erp->er_extbuf[erp->er_extcount], + erp_next->er_extbuf, erp_next->er_extcount * + sizeof(xfs_bmbt_rec_t)); + erp->er_extcount += erp_next->er_extcount; + /* + * Free page before removing extent record + * so er_extoffs don't get modified in + * xfs_iext_irec_remove. + */ + kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ); + erp_next->er_extbuf = NULL; + xfs_iext_irec_remove(ifp, erp_idx + 1); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + } else { + erp_idx++; + } + } +} + +/* + * Fully compact the extent records managed by the indirection array. + */ +void +xfs_iext_irec_compact_full( + xfs_ifork_t *ifp) /* inode fork pointer */ +{ + xfs_bmbt_rec_t *ep, *ep_next; /* extent record pointers */ + xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ + int erp_idx = 0; /* extent irec index */ + int ext_avail; /* empty entries in ex list */ + int ext_diff; /* number of exts to add */ + int nlists; /* number of irec's (ex lists) */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + erp = ifp->if_u1.if_ext_irec; + ep = &erp->er_extbuf[erp->er_extcount]; + erp_next = erp + 1; + ep_next = erp_next->er_extbuf; + while (erp_idx < nlists - 1) { + ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; + ext_diff = MIN(ext_avail, erp_next->er_extcount); + memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); + erp->er_extcount += ext_diff; + erp_next->er_extcount -= ext_diff; + /* Remove next page */ + if (erp_next->er_extcount == 0) { + /* + * Free page before removing extent record + * so er_extoffs don't get modified in + * xfs_iext_irec_remove. + */ + kmem_free(erp_next->er_extbuf, + erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); + erp_next->er_extbuf = NULL; + xfs_iext_irec_remove(ifp, erp_idx + 1); + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + /* Update next page */ + } else { + /* Move rest of page up to become next new page */ + memmove(erp_next->er_extbuf, ep_next, + erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); + ep_next = erp_next->er_extbuf; + memset(&ep_next[erp_next->er_extcount], 0, + (XFS_LINEAR_EXTS - erp_next->er_extcount) * + sizeof(xfs_bmbt_rec_t)); + } + if (erp->er_extcount == XFS_LINEAR_EXTS) { + erp_idx++; + if (erp_idx < nlists) + erp = &ifp->if_u1.if_ext_irec[erp_idx]; + else + break; + } + ep = &erp->er_extbuf[erp->er_extcount]; + erp_next = erp + 1; + ep_next = erp_next->er_extbuf; + } +} + +/* + * This is called to update the er_extoff field in the indirection + * array when extents have been added or removed from one of the + * extent lists. erp_idx contains the irec index to begin updating + * at and ext_diff contains the number of extents that were added + * or removed. + */ +void +xfs_iext_irec_update_extoffs( + xfs_ifork_t *ifp, /* inode fork pointer */ + int erp_idx, /* irec index to update */ + int ext_diff) /* number of new extents */ +{ + int i; /* loop counter */ + int nlists; /* number of irec's (ex lists */ + + ASSERT(ifp->if_flags & XFS_IFEXTIREC); + nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; + for (i = erp_idx; i < nlists; i++) { + ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; + } +} diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1cfbcf18ce8..39ef9c36ea5 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -25,10 +25,37 @@ #define XFS_ATTR_FORK 1 /* + * The following xfs_ext_irec_t struct introduces a second (top) level + * to the in-core extent allocation scheme. These structs are allocated + * in a contiguous block, creating an indirection array where each entry + * (irec) contains a pointer to a buffer of in-core extent records which + * it manages. Each extent buffer is 4k in size, since 4k is the system + * page size on Linux i386 and systems with larger page sizes don't seem + * to gain much, if anything, by using their native page size as the + * extent buffer size. Also, using 4k extent buffers everywhere provides + * a consistent interface for CXFS across different platforms. + * + * There is currently no limit on the number of irec's (extent lists) + * allowed, so heavily fragmented files may require an indirection array + * which spans multiple system pages of memory. The number of extents + * which would require this amount of contiguous memory is very large + * and should not cause problems in the foreseeable future. However, + * if the memory needed for the contiguous array ever becomes a problem, + * it is possible that a third level of indirection may be required. + */ +typedef struct xfs_ext_irec { + xfs_bmbt_rec_t *er_extbuf; /* block of extent records */ + xfs_extnum_t er_extoff; /* extent offset in file */ + xfs_extnum_t er_extcount; /* number of extents in page/block */ +} xfs_ext_irec_t; + +/* * File incore extent information, present for each of data & attr forks. */ -#define XFS_INLINE_EXTS 2 -#define XFS_INLINE_DATA 32 +#define XFS_IEXT_BUFSZ 4096 +#define XFS_LINEAR_EXTS (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t)) +#define XFS_INLINE_EXTS 2 +#define XFS_INLINE_DATA 32 typedef struct xfs_ifork { int if_bytes; /* bytes in if_u1 */ int if_real_bytes; /* bytes allocated in if_u1 */ @@ -39,6 +66,7 @@ typedef struct xfs_ifork { xfs_extnum_t if_lastex; /* last if_extents used */ union { xfs_bmbt_rec_t *if_extents; /* linear map file exts */ + xfs_ext_irec_t *if_ext_irec; /* irec map file exts */ char *if_data; /* inline file data */ } if_u1; union { @@ -61,20 +89,16 @@ typedef struct xfs_ifork { /* * Per-fork incore inode flags. */ -#define XFS_IFINLINE 0x0001 /* Inline data is read in */ -#define XFS_IFEXTENTS 0x0002 /* All extent pointers are read in */ -#define XFS_IFBROOT 0x0004 /* i_broot points to the bmap b-tree root */ +#define XFS_IFINLINE 0x01 /* Inline data is read in */ +#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */ +#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */ +#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ /* - * Flags for xfs_imap() and xfs_dilocate(). + * Flags for xfs_itobp(), xfs_imap() and xfs_dilocate(). */ -#define XFS_IMAP_LOOKUP 0x1 - -/* - * Maximum number of extent pointers in if_u1.if_extents. - */ -#define XFS_MAX_INCORE_EXTENTS 32768 - +#define XFS_IMAP_LOOKUP 0x1 +#define XFS_IMAP_BULKSTAT 0x2 #ifdef __KERNEL__ struct bhv_desc; @@ -398,7 +422,7 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int); */ int xfs_itobp(struct xfs_mount *, struct xfs_trans *, xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **, - xfs_daddr_t); + xfs_daddr_t, uint); int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, xfs_inode_t **, xfs_daddr_t); int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); @@ -440,6 +464,32 @@ xfs_inode_t *xfs_vtoi(struct vnode *vp); void xfs_synchronize_atime(xfs_inode_t *); +xfs_bmbt_rec_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); +void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, + xfs_bmbt_irec_t *); +void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int); +void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int); +void xfs_iext_realloc_direct(xfs_ifork_t *, int); +void xfs_iext_realloc_indirect(xfs_ifork_t *, int); +void xfs_iext_indirect_to_direct(xfs_ifork_t *); +void xfs_iext_direct_to_inline(xfs_ifork_t *, xfs_extnum_t); +void xfs_iext_inline_to_direct(xfs_ifork_t *, int); +void xfs_iext_destroy(xfs_ifork_t *); +xfs_bmbt_rec_t *xfs_iext_bno_to_ext(xfs_ifork_t *, xfs_fileoff_t, int *); +xfs_ext_irec_t *xfs_iext_bno_to_irec(xfs_ifork_t *, xfs_fileoff_t, int *); +xfs_ext_irec_t *xfs_iext_idx_to_irec(xfs_ifork_t *, xfs_extnum_t *, int *, int); +void xfs_iext_irec_init(xfs_ifork_t *); +xfs_ext_irec_t *xfs_iext_irec_new(xfs_ifork_t *, int); +void xfs_iext_irec_remove(xfs_ifork_t *, int); +void xfs_iext_irec_compact(xfs_ifork_t *); +void xfs_iext_irec_compact_pages(xfs_ifork_t *); +void xfs_iext_irec_compact_full(xfs_ifork_t *); +void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int); + #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) #ifdef DEBUG diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 788917f355c..d5dfedcb892 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -76,7 +76,7 @@ xfs_iomap_enter_trace( (void *)((unsigned long)count), (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), (void *)((unsigned long)(io->io_new_size & 0xffffffff)), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL, (void *)NULL, (void *)NULL, diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index c59450e1be4..32247b6bfee 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -562,7 +562,8 @@ xfs_bulkstat( if (bp) xfs_buf_relse(bp); error = xfs_itobp(mp, NULL, ip, - &dip, &bp, bno); + &dip, &bp, bno, + XFS_IMAP_BULKSTAT); if (!error) clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; kmem_zone_free(xfs_inode_zone, ip); @@ -570,6 +571,8 @@ xfs_bulkstat( mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, XFS_RANDOM_BULKSTAT_READ_CHUNK)) { bp = NULL; + ubleft = 0; + rval = error; break; } } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 7d46cbd6a07..add13f507ed 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3249,7 +3249,7 @@ xlog_recover_process_iunlinks( * next inode in the bucket. */ error = xfs_itobp(mp, NULL, ip, &dip, - &ibp, 0); + &ibp, 0, 0); ASSERT(error || (dip != NULL)); } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 62188ea392c..20e8abc16d1 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -51,11 +51,32 @@ STATIC int xfs_uuid_mount(xfs_mount_t *); STATIC void xfs_uuid_unmount(xfs_mount_t *mp); STATIC void xfs_unmountfs_wait(xfs_mount_t *); + +#ifdef HAVE_PERCPU_SB +STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); +STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int); +STATIC void xfs_icsb_sync_counters(xfs_mount_t *); +STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, + int, int); +STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t, + int, int); +STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); + +#else + +#define xfs_icsb_destroy_counters(mp) do { } while (0) +#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) +#define xfs_icsb_sync_counters(mp) do { } while (0) +#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) +#define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0) + +#endif + static const struct { - short offset; - short type; /* 0 = integer - * 1 = binary / string (no translation) - */ + short offset; + short type; /* 0 = integer + * 1 = binary / string (no translation) + */ } xfs_sb_info[] = { { offsetof(xfs_sb_t, sb_magicnum), 0 }, { offsetof(xfs_sb_t, sb_blocksize), 0 }, @@ -113,7 +134,11 @@ xfs_mount_init(void) { xfs_mount_t *mp; - mp = kmem_zalloc(sizeof(*mp), KM_SLEEP); + mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP); + + if (xfs_icsb_init_counters(mp)) { + mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; + } AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); spinlock_init(&mp->m_sb_lock, "xfs_sb"); @@ -136,8 +161,8 @@ xfs_mount_init(void) */ void xfs_mount_free( - xfs_mount_t *mp, - int remove_bhv) + xfs_mount_t *mp, + int remove_bhv) { if (mp->m_ihash) xfs_ihash_free(mp); @@ -177,6 +202,7 @@ xfs_mount_free( VFS_REMOVEBHV(vfsp, &mp->m_bhv); } + xfs_icsb_destroy_counters(mp); kmem_free(mp, sizeof(xfs_mount_t)); } @@ -242,9 +268,12 @@ xfs_mount_validate_sb( sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || + sbp->sb_inodelog < XFS_DINODE_MIN_LOG || + sbp->sb_inodelog > XFS_DINODE_MAX_LOG || + (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || - sbp->sb_imax_pct > 100)) { + (sbp->sb_imax_pct > 100 || sbp->sb_imax_pct < 1))) { cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)", XFS_ERRLEVEL_LOW, mp, sbp); @@ -527,6 +556,10 @@ xfs_readsb(xfs_mount_t *mp) ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); } + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); + mp->m_sb_bp = bp; xfs_buf_relse(bp); ASSERT(XFS_BUF_VALUSEMA(bp) > 0); @@ -1154,6 +1187,9 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) sbp = xfs_getsb(mp, 0); if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || XFS_FORCED_SHUTDOWN(mp))) { + + xfs_icsb_sync_counters(mp); + /* * mark shared-readonly if desired */ @@ -1227,7 +1263,6 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) xfs_trans_log_buf(tp, bp, first, last); } - /* * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply * a delta to a specified field in the in-core superblock. Simply @@ -1237,7 +1272,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) * * The SB_LOCK must be held when this routine is called. */ -STATIC int +int xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) { @@ -1406,9 +1441,26 @@ xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) unsigned long s; int status; - s = XFS_SB_LOCK(mp); - status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - XFS_SB_UNLOCK(mp, s); + /* check for per-cpu counters */ + switch (field) { +#ifdef HAVE_PERCPU_SB + case XFS_SBS_ICOUNT: + case XFS_SBS_IFREE: + case XFS_SBS_FDBLOCKS: + if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { + status = xfs_icsb_modify_counters(mp, field, + delta, rsvd); + break; + } + /* FALLTHROUGH */ +#endif + default: + s = XFS_SB_LOCK(mp); + status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); + XFS_SB_UNLOCK(mp, s); + break; + } + return status; } @@ -1445,8 +1497,26 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) * from the loop so we'll fall into the undo loop * below. */ - status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, - msbp->msb_delta, rsvd); + switch (msbp->msb_field) { +#ifdef HAVE_PERCPU_SB + case XFS_SBS_ICOUNT: + case XFS_SBS_IFREE: + case XFS_SBS_FDBLOCKS: + if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { + status = xfs_icsb_modify_counters_locked(mp, + msbp->msb_field, + msbp->msb_delta, rsvd); + break; + } + /* FALLTHROUGH */ +#endif + default: + status = xfs_mod_incore_sb_unlocked(mp, + msbp->msb_field, + msbp->msb_delta, rsvd); + break; + } + if (status != 0) { break; } @@ -1463,8 +1533,28 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) if (status != 0) { msbp--; while (msbp >= msb) { - status = xfs_mod_incore_sb_unlocked(mp, - msbp->msb_field, -(msbp->msb_delta), rsvd); + switch (msbp->msb_field) { +#ifdef HAVE_PERCPU_SB + case XFS_SBS_ICOUNT: + case XFS_SBS_IFREE: + case XFS_SBS_FDBLOCKS: + if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { + status = + xfs_icsb_modify_counters_locked(mp, + msbp->msb_field, + -(msbp->msb_delta), + rsvd); + break; + } + /* FALLTHROUGH */ +#endif + default: + status = xfs_mod_incore_sb_unlocked(mp, + msbp->msb_field, + -(msbp->msb_delta), + rsvd); + break; + } ASSERT(status == 0); msbp--; } @@ -1577,3 +1667,525 @@ xfs_mount_log_sbunit( xfs_mod_sb(tp, fields); xfs_trans_commit(tp, 0, NULL); } + + +#ifdef HAVE_PERCPU_SB +/* + * Per-cpu incore superblock counters + * + * Simple concept, difficult implementation + * + * Basically, replace the incore superblock counters with a distributed per cpu + * counter for contended fields (e.g. free block count). + * + * Difficulties arise in that the incore sb is used for ENOSPC checking, and + * hence needs to be accurately read when we are running low on space. Hence + * there is a method to enable and disable the per-cpu counters based on how + * much "stuff" is available in them. + * + * Basically, a counter is enabled if there is enough free resource to justify + * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local + * ENOSPC), then we disable the counters to synchronise all callers and + * re-distribute the available resources. + * + * If, once we redistributed the available resources, we still get a failure, + * we disable the per-cpu counter and go through the slow path. + * + * The slow path is the current xfs_mod_incore_sb() function. This means that + * when we disable a per-cpu counter, we need to drain it's resources back to + * the global superblock. We do this after disabling the counter to prevent + * more threads from queueing up on the counter. + * + * Essentially, this means that we still need a lock in the fast path to enable + * synchronisation between the global counters and the per-cpu counters. This + * is not a problem because the lock will be local to a CPU almost all the time + * and have little contention except when we get to ENOSPC conditions. + * + * Basically, this lock becomes a barrier that enables us to lock out the fast + * path while we do things like enabling and disabling counters and + * synchronising the counters. + * + * Locking rules: + * + * 1. XFS_SB_LOCK() before picking up per-cpu locks + * 2. per-cpu locks always picked up via for_each_online_cpu() order + * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks + * 4. modifying per-cpu counters requires holding per-cpu lock + * 5. modifying global counters requires holding XFS_SB_LOCK + * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK + * and _none_ of the per-cpu locks. + * + * Disabled counters are only ever re-enabled by a balance operation + * that results in more free resources per CPU than a given threshold. + * To ensure counters don't remain disabled, they are rebalanced when + * the global resource goes above a higher threshold (i.e. some hysteresis + * is present to prevent thrashing). + */ + +/* + * hot-plug CPU notifier support. + * + * We cannot use the hotcpu_register() function because it does + * not allow notifier instances. We need a notifier per filesystem + * as we need to be able to identify the filesystem to balance + * the counters out. This is acheived by having a notifier block + * embedded in the xfs_mount_t and doing pointer magic to get the + * mount pointer from the notifier block address. + */ +STATIC int +xfs_icsb_cpu_notify( + struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + xfs_icsb_cnts_t *cntp; + xfs_mount_t *mp; + int s; + + mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); + cntp = (xfs_icsb_cnts_t *) + per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); + switch (action) { + case CPU_UP_PREPARE: + /* Easy Case - initialize the area and locks, and + * then rebalance when online does everything else for us. */ + memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); + break; + case CPU_ONLINE: + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); + break; + case CPU_DEAD: + /* Disable all the counters, then fold the dead cpu's + * count into the total on the global superblock and + * re-enable the counters. */ + s = XFS_SB_LOCK(mp); + xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); + xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); + xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); + + mp->m_sb.sb_icount += cntp->icsb_icount; + mp->m_sb.sb_ifree += cntp->icsb_ifree; + mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; + + memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); + + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED); + XFS_SB_UNLOCK(mp, s); + break; + } + + return NOTIFY_OK; +} + +int +xfs_icsb_init_counters( + xfs_mount_t *mp) +{ + xfs_icsb_cnts_t *cntp; + int i; + + mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); + if (mp->m_sb_cnts == NULL) + return -ENOMEM; + + mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; + mp->m_icsb_notifier.priority = 0; + register_cpu_notifier(&mp->m_icsb_notifier); + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); + } + /* + * start with all counters disabled so that the + * initial balance kicks us off correctly + */ + mp->m_icsb_counters = -1; + return 0; +} + +STATIC void +xfs_icsb_destroy_counters( + xfs_mount_t *mp) +{ + if (mp->m_sb_cnts) { + unregister_cpu_notifier(&mp->m_icsb_notifier); + free_percpu(mp->m_sb_cnts); + } +} + +STATIC inline void +xfs_icsb_lock_cntr( + xfs_icsb_cnts_t *icsbp) +{ + while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { + ndelay(1000); + } +} + +STATIC inline void +xfs_icsb_unlock_cntr( + xfs_icsb_cnts_t *icsbp) +{ + clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); +} + + +STATIC inline void +xfs_icsb_lock_all_counters( + xfs_mount_t *mp) +{ + xfs_icsb_cnts_t *cntp; + int i; + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + xfs_icsb_lock_cntr(cntp); + } +} + +STATIC inline void +xfs_icsb_unlock_all_counters( + xfs_mount_t *mp) +{ + xfs_icsb_cnts_t *cntp; + int i; + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + xfs_icsb_unlock_cntr(cntp); + } +} + +STATIC void +xfs_icsb_count( + xfs_mount_t *mp, + xfs_icsb_cnts_t *cnt, + int flags) +{ + xfs_icsb_cnts_t *cntp; + int i; + + memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); + + if (!(flags & XFS_ICSB_LAZY_COUNT)) + xfs_icsb_lock_all_counters(mp); + + for_each_online_cpu(i) { + cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); + cnt->icsb_icount += cntp->icsb_icount; + cnt->icsb_ifree += cntp->icsb_ifree; + cnt->icsb_fdblocks += cntp->icsb_fdblocks; + } + + if (!(flags & XFS_ICSB_LAZY_COUNT)) + xfs_icsb_unlock_all_counters(mp); +} + +STATIC int +xfs_icsb_counter_disabled( + xfs_mount_t *mp, + xfs_sb_field_t field) +{ + ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); + return test_bit(field, &mp->m_icsb_counters); +} + +STATIC int +xfs_icsb_disable_counter( + xfs_mount_t *mp, + xfs_sb_field_t field) +{ + xfs_icsb_cnts_t cnt; + + ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); + + xfs_icsb_lock_all_counters(mp); + if (!test_and_set_bit(field, &mp->m_icsb_counters)) { + /* drain back to superblock */ + + xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT); + switch(field) { + case XFS_SBS_ICOUNT: + mp->m_sb.sb_icount = cnt.icsb_icount; + break; + case XFS_SBS_IFREE: + mp->m_sb.sb_ifree = cnt.icsb_ifree; + break; + case XFS_SBS_FDBLOCKS: + mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; + break; + default: + BUG(); + } + } + + xfs_icsb_unlock_all_counters(mp); + + return 0; +} + +STATIC void +xfs_icsb_enable_counter( + xfs_mount_t *mp, + xfs_sb_field_t field, + uint64_t count, + uint64_t resid) +{ + xfs_icsb_cnts_t *cntp; + int i; + + ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); + + xfs_icsb_lock_all_counters(mp); + for_each_online_cpu(i) { + cntp = per_cpu_ptr(mp->m_sb_cnts, i); + switch (field) { + case XFS_SBS_ICOUNT: + cntp->icsb_icount = count + resid; + break; + case XFS_SBS_IFREE: + cntp->icsb_ifree = count + resid; + break; + case XFS_SBS_FDBLOCKS: + cntp->icsb_fdblocks = count + resid; + break; + default: + BUG(); + break; + } + resid = 0; + } + clear_bit(field, &mp->m_icsb_counters); + xfs_icsb_unlock_all_counters(mp); +} + +STATIC void +xfs_icsb_sync_counters_int( + xfs_mount_t *mp, + int flags) +{ + xfs_icsb_cnts_t cnt; + int s; + + /* Pass 1: lock all counters */ + if ((flags & XFS_ICSB_SB_LOCKED) == 0) + s = XFS_SB_LOCK(mp); + + xfs_icsb_count(mp, &cnt, flags); + + /* Step 3: update mp->m_sb fields */ + if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) + mp->m_sb.sb_icount = cnt.icsb_icount; + if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) + mp->m_sb.sb_ifree = cnt.icsb_ifree; + if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) + mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; + + if ((flags & XFS_ICSB_SB_LOCKED) == 0) + XFS_SB_UNLOCK(mp, s); +} + +/* + * Accurate update of per-cpu counters to incore superblock + */ +STATIC void +xfs_icsb_sync_counters( + xfs_mount_t *mp) +{ + xfs_icsb_sync_counters_int(mp, 0); +} + +/* + * lazy addition used for things like df, background sb syncs, etc + */ +void +xfs_icsb_sync_counters_lazy( + xfs_mount_t *mp) +{ + xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT); +} + +/* + * Balance and enable/disable counters as necessary. + * + * Thresholds for re-enabling counters are somewhat magic. + * inode counts are chosen to be the same number as single + * on disk allocation chunk per CPU, and free blocks is + * something far enough zero that we aren't going thrash + * when we get near ENOSPC. + */ +#define XFS_ICSB_INO_CNTR_REENABLE 64 +#define XFS_ICSB_FDBLK_CNTR_REENABLE 512 +STATIC void +xfs_icsb_balance_counter( + xfs_mount_t *mp, + xfs_sb_field_t field, + int flags) +{ + uint64_t count, resid = 0; + int weight = num_online_cpus(); + int s; + + if (!(flags & XFS_ICSB_SB_LOCKED)) + s = XFS_SB_LOCK(mp); + + /* disable counter and sync counter */ + xfs_icsb_disable_counter(mp, field); + + /* update counters - first CPU gets residual*/ + switch (field) { + case XFS_SBS_ICOUNT: + count = mp->m_sb.sb_icount; + resid = do_div(count, weight); + if (count < XFS_ICSB_INO_CNTR_REENABLE) + goto out; + break; + case XFS_SBS_IFREE: + count = mp->m_sb.sb_ifree; + resid = do_div(count, weight); + if (count < XFS_ICSB_INO_CNTR_REENABLE) + goto out; + break; + case XFS_SBS_FDBLOCKS: + count = mp->m_sb.sb_fdblocks; + resid = do_div(count, weight); + if (count < XFS_ICSB_FDBLK_CNTR_REENABLE) + goto out; + break; + default: + BUG(); + break; + } + + xfs_icsb_enable_counter(mp, field, count, resid); +out: + if (!(flags & XFS_ICSB_SB_LOCKED)) + XFS_SB_UNLOCK(mp, s); +} + +STATIC int +xfs_icsb_modify_counters_int( + xfs_mount_t *mp, + xfs_sb_field_t field, + int delta, + int rsvd, + int flags) +{ + xfs_icsb_cnts_t *icsbp; + long long lcounter; /* long counter for 64 bit fields */ + int cpu, s, locked = 0; + int ret = 0, balance_done = 0; + +again: + cpu = get_cpu(); + icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu), + xfs_icsb_lock_cntr(icsbp); + if (unlikely(xfs_icsb_counter_disabled(mp, field))) + goto slow_path; + + switch (field) { + case XFS_SBS_ICOUNT: + lcounter = icsbp->icsb_icount; + lcounter += delta; + if (unlikely(lcounter < 0)) + goto slow_path; + icsbp->icsb_icount = lcounter; + break; + + case XFS_SBS_IFREE: + lcounter = icsbp->icsb_ifree; + lcounter += delta; + if (unlikely(lcounter < 0)) + goto slow_path; + icsbp->icsb_ifree = lcounter; + break; + + case XFS_SBS_FDBLOCKS: + BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); + + lcounter = icsbp->icsb_fdblocks; + lcounter += delta; + if (unlikely(lcounter < 0)) + goto slow_path; + icsbp->icsb_fdblocks = lcounter; + break; + default: + BUG(); + break; + } + xfs_icsb_unlock_cntr(icsbp); + put_cpu(); + if (locked) + XFS_SB_UNLOCK(mp, s); + return 0; + + /* + * The slow path needs to be run with the SBLOCK + * held so that we prevent other threads from + * attempting to run this path at the same time. + * this provides exclusion for the balancing code, + * and exclusive fallback if the balance does not + * provide enough resources to continue in an unlocked + * manner. + */ +slow_path: + xfs_icsb_unlock_cntr(icsbp); + put_cpu(); + + /* need to hold superblock incase we need + * to disable a counter */ + if (!(flags & XFS_ICSB_SB_LOCKED)) { + s = XFS_SB_LOCK(mp); + locked = 1; + flags |= XFS_ICSB_SB_LOCKED; + } + if (!balance_done) { + xfs_icsb_balance_counter(mp, field, flags); + balance_done = 1; + goto again; + } else { + /* + * we might not have enough on this local + * cpu to allocate for a bulk request. + * We need to drain this field from all CPUs + * and disable the counter fastpath + */ + xfs_icsb_disable_counter(mp, field); + } + + ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); + + if (locked) + XFS_SB_UNLOCK(mp, s); + return ret; +} + +STATIC int +xfs_icsb_modify_counters( + xfs_mount_t *mp, + xfs_sb_field_t field, + int delta, + int rsvd) +{ + return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0); +} + +/* + * Called when superblock is already locked + */ +STATIC int +xfs_icsb_modify_counters_locked( + xfs_mount_t *mp, + xfs_sb_field_t field, + int delta, + int rsvd) +{ + return xfs_icsb_modify_counters_int(mp, field, delta, + rsvd, XFS_ICSB_SB_LOCKED); +} +#endif diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index cd3cf9613a0..ebd73960e9d 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -267,6 +267,34 @@ typedef struct xfs_ioops { #define XFS_IODONE(vfsp) \ (*(mp)->m_io_ops.xfs_iodone)(vfsp) +#ifdef HAVE_PERCPU_SB + +/* + * Valid per-cpu incore superblock counters. Note that if you add new counters, + * you may need to define new counter disabled bit field descriptors as there + * are more possible fields in the superblock that can fit in a bitfield on a + * 32 bit platform. The XFS_SBS_* values for the current current counters just + * fit. + */ +typedef struct xfs_icsb_cnts { + uint64_t icsb_fdblocks; + uint64_t icsb_ifree; + uint64_t icsb_icount; + unsigned long icsb_flags; +} xfs_icsb_cnts_t; + +#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */ + +#define XFS_ICSB_SB_LOCKED (1 << 0) /* sb already locked */ +#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ + +extern int xfs_icsb_init_counters(struct xfs_mount *); +extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *); + +#else +#define xfs_icsb_init_counters(mp) (0) +#define xfs_icsb_sync_counters_lazy(mp) do { } while (0) +#endif typedef struct xfs_mount { bhv_desc_t m_bhv; /* vfs xfs behavior */ @@ -372,6 +400,11 @@ typedef struct xfs_mount { struct xfs_qmops m_qm_ops; /* vector of XQM ops */ struct xfs_ioops m_io_ops; /* vector of I/O ops */ atomic_t m_active_trans; /* number trans frozen */ +#ifdef HAVE_PERCPU_SB + xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ + unsigned long m_icsb_counters; /* disabled per-cpu counters */ + struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ +#endif } xfs_mount_t; /* @@ -386,8 +419,6 @@ typedef struct xfs_mount { #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem operations, typically for disk errors in metadata */ -#define XFS_MOUNT_NOATIME (1ULL << 5) /* don't modify inode access - times on reads */ #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to user */ #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment @@ -411,6 +442,8 @@ typedef struct xfs_mount { #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred * I/O size in stat() */ +#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock + counters */ /* @@ -473,11 +506,6 @@ xfs_preferred_iosize(xfs_mount_t *mp) #define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */ /* - * xflags for xfs_syncsub - */ -#define XFS_XSYNC_RELOC 0x01 - -/* * Flags for xfs_mountfs */ #define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ @@ -548,6 +576,8 @@ extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *); extern int xfs_unmountfs_writesb(xfs_mount_t *); extern int xfs_unmount_flush(xfs_mount_t *, int); extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int); +extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t, + int, int); extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, uint, int); extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index de85eefb796..e6379564447 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h @@ -89,6 +89,7 @@ extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp, */ extern int xfs_rwlock(bhv_desc_t *bdp, vrwlock_t write_lock); extern void xfs_rwunlock(bhv_desc_t *bdp, vrwlock_t write_lock); +extern int xfs_setattr(bhv_desc_t *bdp, vattr_t *vap, int flags, cred_t *credp); extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf, xfs_off_t offset, cred_t *credp, int flags); extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state, diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index d3d714e6b32..2918956553a 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -55,10 +55,141 @@ STATIC void xfs_trans_committed(xfs_trans_t *, int); STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); STATIC void xfs_trans_free(xfs_trans_t *); -kmem_zone_t *xfs_trans_zone; +kmem_zone_t *xfs_trans_zone; /* + * Reservation functions here avoid a huge stack in xfs_trans_init + * due to register overflow from temporaries in the calculations. + */ + +STATIC uint +xfs_calc_write_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_itruncate_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_rename_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_link_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_remove_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_symlink_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_create_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_mkdir_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_ifree_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_ichange_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_growdata_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWDATA_LOG_RES(mp); +} + +STATIC uint +xfs_calc_growrtalloc_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWRTALLOC_LOG_RES(mp); +} + +STATIC uint +xfs_calc_growrtzero_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWRTZERO_LOG_RES(mp); +} + +STATIC uint +xfs_calc_growrtfree_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_GROWRTFREE_LOG_RES(mp); +} + +STATIC uint +xfs_calc_swrite_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_SWRITE_LOG_RES(mp); +} + +STATIC uint +xfs_calc_writeid_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_WRITEID_LOG_RES(mp); +} + +STATIC uint +xfs_calc_addafork_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_attrinval_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ATTRINVAL_LOG_RES(mp); +} + +STATIC uint +xfs_calc_attrset_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_attrrm_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); +} + +STATIC uint +xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp) +{ + return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); +} + +/* * Initialize the precomputed transaction reservation values * in the mount structure. */ @@ -69,39 +200,27 @@ xfs_trans_init( xfs_trans_reservations_t *resp; resp = &(mp->m_reservations); - resp->tr_write = - (uint)(XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_itruncate = - (uint)(XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_rename = - (uint)(XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_link = (uint)XFS_CALC_LINK_LOG_RES(mp); - resp->tr_remove = - (uint)(XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_symlink = - (uint)(XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_create = - (uint)(XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_mkdir = - (uint)(XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_ifree = - (uint)(XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_ichange = - (uint)(XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_growdata = (uint)XFS_CALC_GROWDATA_LOG_RES(mp); - resp->tr_swrite = (uint)XFS_CALC_SWRITE_LOG_RES(mp); - resp->tr_writeid = (uint)XFS_CALC_WRITEID_LOG_RES(mp); - resp->tr_addafork = - (uint)(XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_attrinval = (uint)XFS_CALC_ATTRINVAL_LOG_RES(mp); - resp->tr_attrset = - (uint)(XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_attrrm = - (uint)(XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); - resp->tr_clearagi = (uint)XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); - resp->tr_growrtalloc = (uint)XFS_CALC_GROWRTALLOC_LOG_RES(mp); - resp->tr_growrtzero = (uint)XFS_CALC_GROWRTZERO_LOG_RES(mp); - resp->tr_growrtfree = (uint)XFS_CALC_GROWRTFREE_LOG_RES(mp); + resp->tr_write = xfs_calc_write_reservation(mp); + resp->tr_itruncate = xfs_calc_itruncate_reservation(mp); + resp->tr_rename = xfs_calc_rename_reservation(mp); + resp->tr_link = xfs_calc_link_reservation(mp); + resp->tr_remove = xfs_calc_remove_reservation(mp); + resp->tr_symlink = xfs_calc_symlink_reservation(mp); + resp->tr_create = xfs_calc_create_reservation(mp); + resp->tr_mkdir = xfs_calc_mkdir_reservation(mp); + resp->tr_ifree = xfs_calc_ifree_reservation(mp); + resp->tr_ichange = xfs_calc_ichange_reservation(mp); + resp->tr_growdata = xfs_calc_growdata_reservation(mp); + resp->tr_swrite = xfs_calc_swrite_reservation(mp); + resp->tr_writeid = xfs_calc_writeid_reservation(mp); + resp->tr_addafork = xfs_calc_addafork_reservation(mp); + resp->tr_attrinval = xfs_calc_attrinval_reservation(mp); + resp->tr_attrset = xfs_calc_attrset_reservation(mp); + resp->tr_attrrm = xfs_calc_attrrm_reservation(mp); + resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp); + resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp); + resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp); + resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp); } /* diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index d77901c07f6..e48befa4e33 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -380,7 +380,7 @@ typedef struct xfs_trans { xfs_trans_header_t t_header; /* header for in-log trans */ unsigned int t_busy_free; /* busy descs free */ xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ - xfs_pflags_t t_pflags; /* saved pflags state */ + unsigned long t_pflags; /* saved process flags state */ } xfs_trans_t; #endif /* __KERNEL__ */ diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index b6ad370fab3..d4ec4dfaf19 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -55,7 +55,7 @@ #include "xfs_clnt.h" #include "xfs_fsops.h" -STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); +STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); int xfs_init(void) @@ -77,11 +77,12 @@ xfs_init(void) "xfs_bmap_free_item"); xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), "xfs_btree_cur"); - xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode"); xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); + xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); + xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); /* * The size of the zone allocated buf log item is the maximum @@ -93,17 +94,30 @@ xfs_init(void) (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / NBWORD) * sizeof(int))), "xfs_buf_item"); - xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + - ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + xfs_efd_zone = + kmem_zone_init((sizeof(xfs_efd_log_item_t) + + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * + sizeof(xfs_extent_t))), "xfs_efd_item"); - xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + - ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), + xfs_efi_zone = + kmem_zone_init((sizeof(xfs_efi_log_item_t) + + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * + sizeof(xfs_extent_t))), "xfs_efi_item"); - xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); - xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); - xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), - "xfs_chashlist"); - xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); + + /* + * These zones warrant special memory allocator hints + */ + xfs_inode_zone = + kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | + KM_ZONE_SPREAD, NULL); + xfs_ili_zone = + kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", + KM_ZONE_SPREAD, NULL); + xfs_chashlist_zone = + kmem_zone_init_flags(sizeof(xfs_chashlist_t), "xfs_chashlist", + KM_ZONE_SPREAD, NULL); /* * Allocate global trace buffers. @@ -176,18 +190,18 @@ xfs_cleanup(void) ktrace_free(xfs_alloc_trace_buf); #endif - kmem_cache_destroy(xfs_bmap_free_item_zone); - kmem_cache_destroy(xfs_btree_cur_zone); - kmem_cache_destroy(xfs_inode_zone); - kmem_cache_destroy(xfs_trans_zone); - kmem_cache_destroy(xfs_da_state_zone); - kmem_cache_destroy(xfs_dabuf_zone); - kmem_cache_destroy(xfs_buf_item_zone); - kmem_cache_destroy(xfs_efd_zone); - kmem_cache_destroy(xfs_efi_zone); - kmem_cache_destroy(xfs_ifork_zone); - kmem_cache_destroy(xfs_ili_zone); - kmem_cache_destroy(xfs_chashlist_zone); + kmem_zone_destroy(xfs_bmap_free_item_zone); + kmem_zone_destroy(xfs_btree_cur_zone); + kmem_zone_destroy(xfs_inode_zone); + kmem_zone_destroy(xfs_trans_zone); + kmem_zone_destroy(xfs_da_state_zone); + kmem_zone_destroy(xfs_dabuf_zone); + kmem_zone_destroy(xfs_buf_item_zone); + kmem_zone_destroy(xfs_efd_zone); + kmem_zone_destroy(xfs_efi_zone); + kmem_zone_destroy(xfs_ifork_zone); + kmem_zone_destroy(xfs_ili_zone); + kmem_zone_destroy(xfs_chashlist_zone); } /* @@ -258,8 +272,6 @@ xfs_start_flags( mp->m_inoadd = XFS_INO64_OFFSET; } #endif - if (ap->flags & XFSMNT_NOATIME) - mp->m_flags |= XFS_MOUNT_NOATIME; if (ap->flags & XFSMNT_RETERR) mp->m_flags |= XFS_MOUNT_RETERR; if (ap->flags & XFSMNT_NOALIGN) @@ -620,7 +632,7 @@ xfs_quiesce_fs( xfs_mount_t *mp) { int count = 0, pincount; - + xfs_refcache_purge_mp(mp); xfs_flush_buftarg(mp->m_ddev_targp, 0); xfs_finish_reclaim_all(mp, 0); @@ -631,7 +643,7 @@ xfs_quiesce_fs( * meta data (typically directory updates). * Which then must be flushed and logged before * we can write the unmount record. - */ + */ do { xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, 0, NULL); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); @@ -654,11 +666,6 @@ xfs_mntupdate( xfs_mount_t *mp = XFS_BHVTOM(bdp); int error; - if (args->flags & XFSMNT_NOATIME) - mp->m_flags |= XFS_MOUNT_NOATIME; - else - mp->m_flags &= ~XFS_MOUNT_NOATIME; - if (args->flags & XFSMNT_BARRIER) mp->m_flags |= XFS_MOUNT_BARRIER; else @@ -814,6 +821,7 @@ xfs_statvfs( statp->f_type = XFS_SB_MAGIC; + xfs_icsb_sync_counters_lazy(mp); s = XFS_SB_LOCK(mp); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; @@ -1221,7 +1229,7 @@ xfs_sync_inodes( xfs_iunlock(ip, XFS_ILOCK_SHARED); error = xfs_itobp(mp, NULL, ip, - &dip, &bp, 0); + &dip, &bp, 0, 0); if (!error) { xfs_buf_relse(bp); } else { @@ -1690,10 +1698,7 @@ xfs_parseargs( int iosize; args->flags2 |= XFSMNT2_COMPAT_IOSIZE; - -#if 0 /* XXX: off by default, until some remaining issues ironed out */ - args->flags |= XFSMNT_IDELETE; /* default to on */ -#endif + args->flags |= XFSMNT_IDELETE; if (!options) goto done; @@ -1903,7 +1908,6 @@ xfs_showargs( { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, - { XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP }, { 0, NULL } }; struct proc_xfs_info *xfs_infop; @@ -1939,6 +1943,8 @@ xfs_showargs( seq_printf(m, "," MNTOPT_SWIDTH "=%d", (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); + if (!(mp->m_flags & XFS_MOUNT_IDELETE)) + seq_printf(m, "," MNTOPT_IKEEP); if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) seq_printf(m, "," MNTOPT_LARGEIO); if (mp->m_flags & XFS_MOUNT_BARRIER) diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index eaab355f5a8..a478f42e63f 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -615,6 +615,7 @@ xfs_setattr( code = xfs_igrow_start(ip, vap->va_size, credp); } xfs_iunlock(ip, XFS_ILOCK_EXCL); + vn_iowait(vp); /* wait for the completion of any pending DIOs */ if (!code) code = xfs_itruncate_data(ip, vap->va_size); if (code) { @@ -1556,7 +1557,7 @@ xfs_release( if ((error = xfs_inactive_free_eofblocks(mp, ip))) return error; /* Update linux inode block count after free above */ - LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); } } @@ -1637,7 +1638,7 @@ xfs_inactive( if ((error = xfs_inactive_free_eofblocks(mp, ip))) return VN_INACTIVE_CACHE; /* Update linux inode block count after free above */ - LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, + vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); } goto out; @@ -3186,7 +3187,7 @@ xfs_rmdir( /* Fall through to std_return with error = 0 or the errno * from xfs_trans_commit. */ -std_return: + std_return: if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) { (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dir_vp, DM_RIGHT_NULL, @@ -3196,12 +3197,12 @@ std_return: } return error; -error1: + error1: xfs_bmap_cancel(&free_list); cancel_flags |= XFS_TRANS_ABORT; /* FALLTHROUGH */ -error_return: + error_return: xfs_trans_cancel(tp, cancel_flags); goto std_return; } @@ -4310,8 +4311,10 @@ xfs_free_file_space( ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1); if (attr_flags & ATTR_NOLOCK) need_iolock = 0; - if (need_iolock) + if (need_iolock) { xfs_ilock(ip, XFS_IOLOCK_EXCL); + vn_iowait(vp); /* wait for the completion of any pending DIOs */ + } rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), (__uint8_t)NBPP); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0b54e9a4a8a..e496fac860a 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -330,6 +330,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver); int acpi_bus_unregister_driver(struct acpi_driver *driver); int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, acpi_handle handle, int type); +int acpi_bus_trim(struct acpi_device *start, int rmdevice); int acpi_bus_start(struct acpi_device *device); int acpi_match_ids(struct acpi_device *device, char *ids); diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h index 6f92482cc96..0c017fc181c 100644 --- a/include/asm-alpha/mmu_context.h +++ b/include/asm-alpha/mmu_context.h @@ -231,9 +231,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) - mm->context[i] = 0; + for_each_online_cpu(i) + mm->context[i] = 0; if (tsk != current) task_thread_info(tsk)->pcb.ptbr = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h index eb740e280d9..420ccde6b91 100644 --- a/include/asm-alpha/topology.h +++ b/include/asm-alpha/topology.h @@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node) cpumask_t node_cpu_mask = CPU_MASK_NONE; int cpu; - for(cpu = 0; cpu < NR_CPUS; cpu++) { - if (cpu_online(cpu) && (cpu_to_node(cpu) == node)) + for_each_online_cpu(cpu) { + if (cpu_to_node(cpu) == node) cpu_set(cpu, node_cpu_mask); } diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 400c2b41896..1a565a9d2fa 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -7,7 +7,7 @@ #ifdef CONFIG_BUG #ifndef HAVE_ARCH_BUG #define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ panic("BUG!"); \ } while (0) #endif @@ -19,7 +19,7 @@ #ifndef HAVE_ARCH_WARN_ON #define WARN_ON(condition) do { \ if (unlikely((condition)!=0)) { \ - printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ + printk("BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __FUNCTION__); \ dump_stack(); \ } \ } while (0) diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 9044aeb3782..78cf45547e3 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -19,10 +19,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ + for_each_cpu(__i) \ + memcpy((pcpudst)+__per_cpu_offset[__i], \ + (src), (size)); \ } while (0) #else /* ! SMP */ diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h new file mode 100644 index 00000000000..e201decea0c --- /dev/null +++ b/include/asm-i386/alternative.h @@ -0,0 +1,129 @@ +#ifndef _I386_ALTERNATIVE_H +#define _I386_ALTERNATIVE_H + +#ifdef __KERNEL__ + +struct alt_instr { + u8 *instr; /* original instruction */ + u8 *replacement; + u8 cpuid; /* cpuid bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction, <= instrlen */ + u8 pad; +}; + +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_smp_switch(int smp); + +#endif + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */\ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement maake sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */\ + ".previous" :: "i" (feature), ##input) + +/* + * Alternative inline assembly for SMP. + * + * alternative_smp() takes two versions (SMP first, UP second) and is + * for more complex stuff such as spinlocks. + * + * The LOCK_PREFIX macro defined here replaces the LOCK and + * LOCK_PREFIX macros used everywhere in the source tree. + * + * SMP alternatives use the same data structures as the other + * alternatives and the X86_FEATURE_UP flag to indicate the case of a + * UP system running a SMP kernel. The existing apply_alternatives() + * works fine for patching a SMP kernel for UP. + * + * The SMP alternative tables can be kept after boot and contain both + * UP and SMP versions of the instructions to allow switching back to + * SMP at runtime, when hotplugging in a new CPU, which is especially + * useful in virtualized environments. + * + * The very common lock prefix is handled as special case in a + * separate table which is a pure address list without replacement ptr + * and size information. That keeps the table sizes small. + */ + +#ifdef CONFIG_SMP +#define alternative_smp(smpinstr, upinstr, args...) \ + asm volatile ("661:\n\t" smpinstr "\n662:\n" \ + ".section .smp_altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte 0x68\n" /* X86_FEATURE_UP */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .smp_altinstr_replacement,\"awx\"\n" \ + "663:\n\t" upinstr "\n" /* replacement */ \ + "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ + ".previous" : args) + +#define LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + " .align 4\n" \ + " .long 661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +#else /* ! CONFIG_SMP */ +#define alternative_smp(smpinstr, upinstr, args...) \ + asm volatile (upinstr : args) +#define LOCK_PREFIX "" +#endif + +#endif /* _I386_ALTERNATIVE_H */ diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h index 28b96a6fb9f..238cf4275b9 100644 --- a/include/asm-i386/arch_hooks.h +++ b/include/asm-i386/arch_hooks.h @@ -24,4 +24,7 @@ extern void trap_init_hook(void); extern void time_init_hook(void); extern void mca_nmi_hook(void); +extern int setup_early_printk(char *); +extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); + #endif diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index de649d3aa2d..22d80ece95c 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -10,12 +10,6 @@ * resource counting etc.. */ -#ifdef CONFIG_SMP -#define LOCK "lock ; " -#else -#define LOCK "" -#endif - /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, @@ -52,7 +46,7 @@ typedef struct { volatile int counter; } atomic_t; static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "addl %1,%0" + LOCK_PREFIX "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -67,7 +61,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "subl %1,%0" + LOCK_PREFIX "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -86,7 +80,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "subl %2,%0; sete %1" + LOCK_PREFIX "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -101,7 +95,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( - LOCK "incl %0" + LOCK_PREFIX "incl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -115,7 +109,7 @@ static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( - LOCK "decl %0" + LOCK_PREFIX "decl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -133,7 +127,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "decl %0; sete %1" + LOCK_PREFIX "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -152,7 +146,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "incl %0; sete %1" + LOCK_PREFIX "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -172,7 +166,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "addl %2,%0; sets %1" + LOCK_PREFIX "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -195,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) /* Modern 486+ processor */ __i = i; __asm__ __volatile__( - LOCK "xaddl %0, %1;" + LOCK_PREFIX "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; @@ -231,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ + } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -242,11 +242,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK "andl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK "orl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ : : "r" (mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 88e6ca248cd..7d20b95edb3 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h @@ -7,6 +7,7 @@ #include <linux/config.h> #include <linux/compiler.h> +#include <asm/alternative.h> /* * These have to be done with inline assembly: that way the bit-setting @@ -16,12 +17,6 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else -#define LOCK_PREFIX "" -#endif - #define ADDR (*(volatile long *) addr) /** diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h index 615911e5bd2..ca15c9c665c 100644 --- a/include/asm-i386/cache.h +++ b/include/asm-i386/cache.h @@ -10,4 +10,6 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + #endif diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index c4ec2a4d8fd..5c0b5876b93 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h @@ -70,6 +70,7 @@ #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h index 56211414fc9..6312c3e7981 100644 --- a/include/asm-i386/mach-default/do_timer.h +++ b/include/asm-i386/mach-default/do_timer.h @@ -18,7 +18,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) { do_timer(regs); #ifndef CONFIG_SMP - update_process_times(user_mode(regs)); + update_process_times(user_mode_vm(regs)); #endif /* * In the SMP case we use the local APIC timer interrupt to do the diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h index 4a0637a3e20..99f66be240b 100644 --- a/include/asm-i386/mach-es7000/mach_mpparse.h +++ b/include/asm-i386/mach-es7000/mach_mpparse.h @@ -30,7 +30,8 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, return 0; } -static inline int es7000_check_dsdt() +#ifdef CONFIG_ACPI +static inline int es7000_check_dsdt(void) { struct acpi_table_header *header = NULL; if(!acpi_get_table_header_early(ACPI_DSDT, &header)) @@ -54,6 +55,11 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) } return 0; } - +#else +static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return 0; +} +#endif #endif /* __ASM_MACH_MPPARSE_H */ diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h index 92d638fc8b1..95568e6ca91 100644 --- a/include/asm-i386/mach-visws/do_timer.h +++ b/include/asm-i386/mach-visws/do_timer.h @@ -11,7 +11,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) do_timer(regs); #ifndef CONFIG_SMP - update_process_times(user_mode(regs)); + update_process_times(user_mode_vm(regs)); #endif /* * In the SMP case we use the local APIC timer interrupt to do the diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h index ae510e5d0d7..eaf51809898 100644 --- a/include/asm-i386/mach-voyager/do_timer.h +++ b/include/asm-i386/mach-voyager/do_timer.h @@ -5,7 +5,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) { do_timer(regs); #ifndef CONFIG_SMP - update_process_times(user_mode(regs)); + update_process_times(user_mode_vm(regs)); #endif voyager_timer_interrupt(regs); diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h index 64a0b8e6afe..62113d3bfdc 100644 --- a/include/asm-i386/mpspec.h +++ b/include/asm-i386/mpspec.h @@ -22,7 +22,6 @@ extern int mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; extern int mpc_default_type; -extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned long mp_lapic_addr; extern int pic_mode; extern int using_apic_timer; diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h index 5b6ceda68c5..64cf937c7e3 100644 --- a/include/asm-i386/mtrr.h +++ b/include/asm-i386/mtrr.h @@ -25,6 +25,7 @@ #include <linux/config.h> #include <linux/ioctl.h> +#include <linux/errno.h> #define MTRR_IOCTL_BASE 'M' diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h index 9b2199e829f..05a53853122 100644 --- a/include/asm-i386/mutex.h +++ b/include/asm-i386/mutex.h @@ -9,6 +9,8 @@ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H +#include "asm/alternative.h" + /** * __mutex_fastpath_lock - try to take the lock by moving the count * from 1 to a 0 value @@ -27,7 +29,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK " decl (%%eax) \n" \ + LOCK_PREFIX " decl (%%eax) \n" \ " js 2f \n" \ "1: \n" \ \ @@ -83,7 +85,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK " incl (%%eax) \n" \ + LOCK_PREFIX " incl (%%eax) \n" \ " jle 2f \n" \ "1: \n" \ \ diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 74ef721b534..27bde973abc 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h @@ -61,4 +61,6 @@ static inline int pte_exec_kernel(pte_t pte) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +void vmalloc_sync_all(void); + #endif /* _I386_PGTABLE_2LEVEL_H */ diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index f1a8b454920..36a5aa63cbb 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h @@ -152,4 +152,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) #define __pmd_free_tlb(tlb, x) do { } while (0) +#define vmalloc_sync_all() ((void)0) + #endif /* _I386_PGTABLE_3LEVEL_H */ diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h index b57cc7afdf7..94f00195d54 100644 --- a/include/asm-i386/rwlock.h +++ b/include/asm-i386/rwlock.h @@ -21,21 +21,23 @@ #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $1,(%0)\n\t" \ - "jns 1f\n" \ - "call " helper "\n\t" \ - "1:\n" \ - ::"a" (rw) : "memory") + alternative_smp("lock; subl $1,(%0)\n\t" \ + "jns 1f\n" \ + "call " helper "\n\t" \ + "1:\n", \ + "subl $1,(%0)\n\t", \ + :"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ - asm volatile(LOCK "subl $1,%0\n\t" \ - "jns 1f\n" \ - "pushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "1:\n" \ - :"=m" (*(volatile int *)rw) : : "memory") + alternative_smp("lock; subl $1,%0\n\t" \ + "jns 1f\n" \ + "pushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "1:\n", \ + "subl $1,%0\n\t", \ + "=m" (*(volatile int *)rw) : : "memory") #define __build_read_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ @@ -45,21 +47,23 @@ } while (0) #define __build_write_lock_ptr(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jz 1f\n" \ - "call " helper "\n\t" \ - "1:\n" \ - ::"a" (rw) : "memory") + alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ + "jz 1f\n" \ + "call " helper "\n\t" \ + "1:\n", \ + "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t", \ + :"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ - asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ - "jz 1f\n" \ - "pushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "1:\n" \ - :"=m" (*(volatile int *)rw) : : "memory") + alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ + "jz 1f\n" \ + "pushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "1:\n", \ + "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \ + "=m" (*(volatile int *)rw) : : "memory") #define __build_write_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h index 6a42b2142fd..f7a0f310c52 100644 --- a/include/asm-i386/semaphore.h +++ b/include/asm-i386/semaphore.h @@ -99,7 +99,7 @@ static inline void down(struct semaphore * sem) might_sleep(); __asm__ __volatile__( "# atomic down operation\n\t" - LOCK "decl %0\n\t" /* --sem->count */ + LOCK_PREFIX "decl %0\n\t" /* --sem->count */ "js 2f\n" "1:\n" LOCK_SECTION_START("") @@ -123,7 +123,7 @@ static inline int down_interruptible(struct semaphore * sem) might_sleep(); __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK "decl %1\n\t" /* --sem->count */ + LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -148,7 +148,7 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK "decl %1\n\t" /* --sem->count */ + LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -173,7 +173,7 @@ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" - LOCK "incl %0\n\t" /* ++sem->count */ + LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ "jle 2f\n" "1:\n" LOCK_SECTION_START("") diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 23604350cdf..d76b7693cf1 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -35,31 +35,41 @@ #define __raw_spin_lock_string_flags \ "\n1:\t" \ "lock ; decb %0\n\t" \ - "jns 4f\n\t" \ + "jns 5f\n" \ "2:\t" \ "testl $0x200, %1\n\t" \ - "jz 3f\n\t" \ - "sti\n\t" \ + "jz 4f\n\t" \ + "sti\n" \ "3:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jle 3b\n\t" \ "cli\n\t" \ "jmp 1b\n" \ - "4:\n\t" + "4:\t" \ + "rep;nop\n\t" \ + "cmpb $0, %0\n\t" \ + "jg 1b\n\t" \ + "jmp 4b\n" \ + "5:\n\t" + +#define __raw_spin_lock_string_up \ + "\n\tdecb %0" static inline void __raw_spin_lock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_lock_string - :"=m" (lock->slock) : : "memory"); + alternative_smp( + __raw_spin_lock_string, + __raw_spin_lock_string_up, + "=m" (lock->slock) : : "memory"); } static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { - __asm__ __volatile__( - __raw_spin_lock_string_flags - :"=m" (lock->slock) : "r" (flags) : "memory"); + alternative_smp( + __raw_spin_lock_string_flags, + __raw_spin_lock_string_up, + "=m" (lock->slock) : "r" (flags) : "memory"); } static inline int __raw_spin_trylock(raw_spinlock_t *lock) @@ -178,12 +188,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) static inline void __raw_read_unlock(raw_rwlock_t *rw) { - asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); + asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { - asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" + asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" : "=m" (rw->lock) : : "memory"); } diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 399145a247f..d0d8d7448d8 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -352,67 +352,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #endif -#ifdef __KERNEL__ -struct alt_instr { - __u8 *instr; /* original instruction */ - __u8 *replacement; - __u8 cpuid; /* cpuid bit set for replacement */ - __u8 instrlen; /* length of original instruction */ - __u8 replacementlen; /* length of new instruction, <= instrlen */ - __u8 pad; -}; -#endif - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Pecularities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement maake sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature), ##input) - /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking @@ -558,5 +497,6 @@ static inline void sched_cacheflush(void) } extern unsigned long arch_align_stack(unsigned long sp); +extern void free_init_pages(char *what, unsigned long begin, unsigned long end); #endif diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 3f1337c3420..371457b1ceb 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h @@ -197,13 +197,15 @@ extern void __put_user_8(void); #define put_user(x,ptr) \ ({ int __ret_pu; \ + __typeof__(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ + __pu_val = x; \ switch(sizeof(*(ptr))) { \ - case 1: __put_user_1(x, ptr); break; \ - case 2: __put_user_2(x, ptr); break; \ - case 4: __put_user_4(x, ptr); break; \ - case 8: __put_user_8(x, ptr); break; \ - default:__put_user_X(x, ptr); break; \ + case 1: __put_user_1(__pu_val, ptr); break; \ + case 2: __put_user_2(__pu_val, ptr); break; \ + case 4: __put_user_4(__pu_val, ptr); break; \ + case 8: __put_user_8(__pu_val, ptr); break; \ + default:__put_user_X(__pu_val, ptr); break; \ } \ __ret_pu; \ }) diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index dc81a55dd94..d8afd0e3b81 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -347,9 +347,9 @@ __syscall_return(type,__res); \ type name(type1 arg1) \ { \ long __res; \ -__asm__ volatile ("int $0x80" \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ - : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \ + : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \ __syscall_return(type,__res); \ } @@ -357,9 +357,10 @@ __syscall_return(type,__res); \ type name(type1 arg1,type2 arg2) \ { \ long __res; \ -__asm__ volatile ("int $0x80" \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ - : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \ + : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \ + : "memory"); \ __syscall_return(type,__res); \ } @@ -367,9 +368,9 @@ __syscall_return(type,__res); \ type name(type1 arg1,type2 arg2,type3 arg3) \ { \ long __res; \ -__asm__ volatile ("int $0x80" \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ - : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ + : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)) : "memory"); \ __syscall_return(type,__res); \ } @@ -378,9 +379,9 @@ __syscall_return(type,__res); \ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ { \ long __res; \ -__asm__ volatile ("int $0x80" \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ - : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ + : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ __syscall_return(type,__res); \ } @@ -390,10 +391,12 @@ __syscall_return(type,__res); \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ { \ long __res; \ -__asm__ volatile ("int $0x80" \ +__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \ + "int $0x80 ; pop %%ebx" \ : "=a" (__res) \ - : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ - "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \ + : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ + : "memory"); \ __syscall_return(type,__res); \ } @@ -402,11 +405,14 @@ __syscall_return(type,__res); \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ { \ long __res; \ -__asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \ + struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \ +__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ + "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \ + "pop %%ebx ; pop %%ebp" \ : "=a" (__res) \ - : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ - "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ - "0" ((long)(arg6)) : "memory"); \ + : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ + : "memory"); \ __syscall_return(type,__res); \ } diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index d3e0dfa99e1..569ec7574ba 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ + } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h index 40dd25195d6..f0a104db8f2 100644 --- a/include/asm-ia64/cache.h +++ b/include/asm-ia64/cache.h @@ -25,4 +25,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + #endif /* _ASM_IA64_CACHE_H */ diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index 862e497c264..732d696d31a 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -175,8 +175,14 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ + } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-parisc/cache.h b/include/asm-parisc/cache.h index 93f179f13ce..ae50f8e12ee 100644 --- a/include/asm-parisc/cache.h +++ b/include/asm-parisc/cache.h @@ -29,6 +29,8 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + extern void flush_data_cache_local(void *); /* flushes local data-cache only */ extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ #ifdef CONFIG_SMP diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index e31922c50e5..464301cd0d0 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -27,10 +27,9 @@ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for_each_cpu(__i) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) extern void setup_per_cpu_areas(void); diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index be6fefe223d..de1d9926aa6 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -89,10 +89,15 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + for (;;) { + if (unlikely(c == u)) + break; + old = atomic_cmpxchg(v, c, c + a); + if (likely(old == c)) + break; c = old; + } return c != u; } @@ -167,10 +172,15 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long long a, long long u) { long long c, old; - c = atomic64_read(v); - while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) + for (;;) { + if (unlikely(c == u)) + break; + old = atomic64_cmpxchg(v, c, c + a); + if (likely(old == c)) + break; c = old; + } return c != u; } diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index 123fcaca295..e10ed87094f 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h @@ -46,10 +46,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ + for_each_cpu(__i) \ + memcpy((pcpudst)+__per_cpu_offset[__i], \ + (src), (size)); \ } while (0) #else /* ! SMP */ diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index 25256bdc8aa..468eb48d814 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -78,9 +78,15 @@ extern int atomic64_sub_ret(int, atomic64_t *); ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ - c != (u); \ + } \ + likely(c != (u)); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h index f7d35a2ae9b..e9df17acedd 100644 --- a/include/asm-sparc64/cache.h +++ b/include/asm-sparc64/cache.h @@ -13,4 +13,6 @@ #define SMP_CACHE_BYTES_SHIFT 6 #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + #endif diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index aea4e51e7cd..82032e159a7 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h @@ -26,10 +26,9 @@ register unsigned long __local_per_cpu_offset asm("g5"); #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for_each_cpu(__i) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) #else /* ! SMP */ diff --git a/include/asm-um/alternative.h b/include/asm-um/alternative.h new file mode 100644 index 00000000000..b6434396bd4 --- /dev/null +++ b/include/asm-um/alternative.h @@ -0,0 +1,6 @@ +#ifndef __UM_ALTERNATIVE_H +#define __UM_ALTERNATIVE_H + +#include "asm/arch/alternative.h" + +#endif diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 4b5cd553e77..cecbf7baa6a 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ + } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h index 263f0a211ed..c8043a16152 100644 --- a/include/asm-x86_64/cache.h +++ b/include/asm-x86_64/cache.h @@ -20,6 +20,8 @@ __attribute__((__section__(".data.page_aligned"))) #endif +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + #endif #endif diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 29a6b0408f7..4405b4adeab 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -26,10 +26,9 @@ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for_each_cpu(__i) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) extern void setup_per_cpu_areas(void); diff --git a/include/linux/cache.h b/include/linux/cache.h index d22e632f41f..cc4b3aafad9 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -13,9 +13,7 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif -#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) -#else +#ifndef __read_mostly #define __read_mostly #endif diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index b68fdf1f315..3c9b0bc0512 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -378,7 +378,6 @@ struct cdrom_generic_command #define CDC_MEDIA_CHANGED 0x80 /* media changed */ #define CDC_PLAY_AUDIO 0x100 /* audio functions */ #define CDC_RESET 0x200 /* hard reset device */ -#define CDC_IOCTLS 0x400 /* driver has non-standard ioctls */ #define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */ #define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */ #define CDC_CD_R 0x2000 /* drive is a CD-R */ @@ -974,9 +973,7 @@ struct cdrom_device_ops { int (*reset) (struct cdrom_device_info *); /* play stuff */ int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); - /* dev-specific */ - int (*dev_ioctl) (struct cdrom_device_info *, - unsigned int, unsigned long); + /* driver specifications */ const int capability; /* capability flags */ int n_minors; /* number of active minor devices */ diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 1289f0ec4c0..1e4bdfcf83a 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -52,7 +52,12 @@ struct file; #ifdef CONFIG_EPOLL /* Used to initialize the epoll bits inside the "struct file" */ -void eventpoll_init_file(struct file *file); +static inline void eventpoll_init_file(struct file *file) +{ + INIT_LIST_HEAD(&file->f_ep_links); + spin_lock_init(&file->f_ep_lock); +} + /* Used to release the epoll bits inside the "struct file" */ void eventpoll_release_file(struct file *file); @@ -85,7 +90,6 @@ static inline void eventpoll_release(struct file *file) eventpoll_release_file(file); } - #else static inline void eventpoll_init_file(struct file *file) {} diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index c0272d73ab2..e7239f2f97a 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -772,9 +772,12 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned); /* inode.c */ -extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); -extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); -extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); +int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); +struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); +struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); +int ext3_get_block_handle(handle_t *handle, struct inode *inode, + sector_t iblock, struct buffer_head *bh_result, int create, + int extend_disksize); extern void ext3_read_inode (struct inode *); extern int ext3_write_inode (struct inode *, int); diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h index e71dd98dbca..7abf9014718 100644 --- a/include/linux/ext3_fs_i.h +++ b/include/linux/ext3_fs_i.h @@ -19,6 +19,7 @@ #include <linux/rwsem.h> #include <linux/rbtree.h> #include <linux/seqlock.h> +#include <linux/mutex.h> struct ext3_reserve_window { __u32 _rsv_start; /* First byte reserved */ @@ -122,16 +123,16 @@ struct ext3_inode_info { __u16 i_extra_isize; /* - * truncate_sem is for serialising ext3_truncate() against + * truncate_mutex is for serialising ext3_truncate() against * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's * data tree are chopped off during truncate. We can't do that in * ext3 because whenever we perform intermediate commits during * truncate, the inode and all the metadata blocks *must* be in a * consistent state which allows truncation of the orphans to restart * during recovery. Hence we must fix the get_block-vs-truncate race - * by other means, so we have truncate_sem. + * by other means, so we have truncate_mutex. */ - struct semaphore truncate_sem; + struct mutex truncate_mutex; struct inode vfs_inode; }; diff --git a/include/linux/file.h b/include/linux/file.h index 9901b850f2e..9f7c2513866 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -10,6 +10,7 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> +#include <linux/types.h> /* * The default fd array needs to be at least BITS_PER_LONG, @@ -17,10 +18,22 @@ */ #define NR_OPEN_DEFAULT BITS_PER_LONG +/* + * The embedded_fd_set is a small fd_set, + * suitable for most tasks (which open <= BITS_PER_LONG files) + */ +struct embedded_fd_set { + unsigned long fds_bits[1]; +}; + +/* + * More than this number of fds: we use a separately allocated fd_set + */ +#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set)) + struct fdtable { unsigned int max_fds; int max_fdset; - int next_fd; struct file ** fd; /* current fd array */ fd_set *close_on_exec; fd_set *open_fds; @@ -33,13 +46,20 @@ struct fdtable { * Open file table structure */ struct files_struct { + /* + * read mostly part + */ atomic_t count; struct fdtable *fdt; struct fdtable fdtab; - fd_set close_on_exec_init; - fd_set open_fds_init; + /* + * written part on a separate cache line in SMP + */ + spinlock_t file_lock ____cacheline_aligned_in_smp; + int next_fd; + struct embedded_fd_set close_on_exec_init; + struct embedded_fd_set open_fds_init; struct file * fd_array[NR_OPEN_DEFAULT]; - spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */ }; #define files_fdtable(files) (rcu_dereference((files)->fdt)) diff --git a/include/linux/fs.h b/include/linux/fs.h index 128d0082522..f9c9dea636d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -397,8 +397,8 @@ struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ struct inode * bd_inode; /* will die */ int bd_openers; - struct semaphore bd_sem; /* open/close mutex */ - struct semaphore bd_mount_sem; /* mount mutex */ + struct mutex bd_mutex; /* open/close mutex */ + struct mutex bd_mount_mutex; /* mount mutex */ struct list_head bd_inodes; void * bd_holder; int bd_holders; @@ -509,7 +509,7 @@ struct inode { #ifdef CONFIG_INOTIFY struct list_head inotify_watches; /* watches on this inode */ - struct semaphore inotify_sem; /* protects the watches list */ + struct mutex inotify_mutex; /* protects the watches list */ #endif unsigned long i_state; @@ -847,7 +847,7 @@ struct super_block { * The next field is for VFS *only*. No filesystems have any business * even looking at it. You had been warned. */ - struct semaphore s_vfs_rename_sem; /* Kludge */ + struct mutex s_vfs_rename_mutex; /* Kludge */ /* Granuality of c/m/atime in ns. Cannot be worse than a second */ @@ -1115,6 +1115,18 @@ static inline void mark_inode_dirty_sync(struct inode *inode) __mark_inode_dirty(inode, I_DIRTY_SYNC); } +static inline void inode_inc_link_count(struct inode *inode) +{ + inode->i_nlink++; + mark_inode_dirty(inode); +} + +static inline void inode_dec_link_count(struct inode *inode) +{ + inode->i_nlink--; + mark_inode_dirty(inode); +} + extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); static inline void file_accessed(struct file *file) { @@ -1534,7 +1546,7 @@ extern void destroy_inode(struct inode *); extern struct inode *new_inode(struct super_block *); extern int remove_suid(struct dentry *); extern void remove_dquot_ref(struct super_block *, int, struct list_head *); -extern struct semaphore iprune_sem; +extern struct mutex iprune_mutex; extern void __insert_inode_hash(struct inode *, unsigned long hashval); extern void remove_inode_hash(struct inode *); diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h index 0abe9d9a006..652611a4bdc 100644 --- a/include/linux/generic_serial.h +++ b/include/linux/generic_serial.h @@ -12,6 +12,8 @@ #ifndef GENERIC_SERIAL_H #define GENERIC_SERIAL_H +#include <linux/mutex.h> + struct real_driver { void (*disable_tx_interrupts) (void *); void (*enable_tx_interrupts) (void *); @@ -34,7 +36,7 @@ struct gs_port { int xmit_head; int xmit_tail; int xmit_cnt; - struct semaphore port_write_sem; + struct mutex port_write_mutex; int flags; wait_queue_head_t open_wait; wait_queue_head_t close_wait; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index eef5ccdcd73..fd647fde5ec 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -149,22 +149,16 @@ struct disk_attribute { ({ \ typeof(gendiskp->dkstats->field) res = 0; \ int i; \ - for (i=0; i < NR_CPUS; i++) { \ - if (!cpu_possible(i)) \ - continue; \ + for_each_cpu(i) \ res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ - } \ res; \ }) static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { int i; - for (i=0; i < NR_CPUS; i++) { - if (cpu_possible(i)) { - memset(per_cpu_ptr(gendiskp->dkstats, i), value, - sizeof (struct disk_stats)); - } - } + for_each_cpu(i) + memset(per_cpu_ptr(gendiskp->dkstats, i), value, + sizeof (struct disk_stats)); } #else diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h index 7eb4004b360..a90c09d331c 100644 --- a/include/linux/hwmon-sysfs.h +++ b/include/linux/hwmon-sysfs.h @@ -27,11 +27,13 @@ struct sensor_device_attribute{ #define to_sensor_dev_attr(_dev_attr) \ container_of(_dev_attr, struct sensor_device_attribute, dev_attr) -#define SENSOR_DEVICE_ATTR(_name,_mode,_show,_store,_index) \ -struct sensor_device_attribute sensor_dev_attr_##_name = { \ - .dev_attr = __ATTR(_name,_mode,_show,_store), \ - .index = _index, \ -} +#define SENSOR_ATTR(_name, _mode, _show, _store, _index) \ + { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .index = _index } + +#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \ +struct sensor_device_attribute sensor_dev_attr_##_name \ + = SENSOR_ATTR(_name, _mode, _show, _store, _index) struct sensor_device_attribute_2 { struct device_attribute dev_attr; @@ -41,11 +43,13 @@ struct sensor_device_attribute_2 { #define to_sensor_dev_attr_2(_dev_attr) \ container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr) +#define SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) \ + { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .index = _index, \ + .nr = _nr } + #define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \ -struct sensor_device_attribute_2 sensor_dev_attr_##_name = { \ - .dev_attr = __ATTR(_name,_mode,_show,_store), \ - .index = _index, \ - .nr = _nr, \ -} +struct sensor_device_attribute_2 sensor_dev_attr_##_name \ + = SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) #endif /* _LINUX_HWMON_SYSFS_H */ diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 474c8f4f5d4..ec311bc8943 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -172,7 +172,6 @@ #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ #define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */ #define I2C_HW_B_TSUNA 0x010012 /* DEC Tsunami chipset */ -#define I2C_HW_B_FRODO 0x010013 /* 2d3D SA-1110 Development Board */ #define I2C_HW_B_OMAHA 0x010014 /* Omaha I2C interface (ARM) */ #define I2C_HW_B_GUIDE 0x010015 /* Guide bit-basher */ #define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 63f1d63cc1d..1635ee25918 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -32,7 +32,7 @@ #include <linux/mod_devicetable.h> #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ -#include <asm/semaphore.h> +#include <linux/mutex.h> /* --- For i2c-isa ---------------------------------------------------- */ @@ -225,8 +225,8 @@ struct i2c_adapter { int (*client_unregister)(struct i2c_client *); /* data fields that are valid for all devices */ - struct semaphore bus_lock; - struct semaphore clist_lock; + struct mutex bus_lock; + struct mutex clist_lock; int timeout; int retries; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index dcfd2ecccb5..92146f3b742 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -7,11 +7,10 @@ #define INIT_FDTABLE \ { \ .max_fds = NR_OPEN_DEFAULT, \ - .max_fdset = __FD_SETSIZE, \ - .next_fd = 0, \ + .max_fdset = EMBEDDED_FD_SET_SIZE, \ .fd = &init_files.fd_array[0], \ - .close_on_exec = &init_files.close_on_exec_init, \ - .open_fds = &init_files.open_fds_init, \ + .close_on_exec = (fd_set *)&init_files.close_on_exec_init, \ + .open_fds = (fd_set *)&init_files.open_fds_init, \ .rcu = RCU_HEAD_INIT, \ .free_files = NULL, \ .next = NULL, \ @@ -20,9 +19,10 @@ #define INIT_FILES \ { \ .count = ATOMIC_INIT(1), \ - .file_lock = SPIN_LOCK_UNLOCKED, \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ + .file_lock = SPIN_LOCK_UNLOCKED, \ + .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ .fd_array = { NULL, } \ diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 41ee79962bb..2ccbfb6340b 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -28,6 +28,7 @@ #include <linux/journal-head.h> #include <linux/stddef.h> #include <linux/bit_spinlock.h> +#include <linux/mutex.h> #include <asm/semaphore.h> #endif @@ -575,7 +576,7 @@ struct transaction_s * @j_wait_checkpoint: Wait queue to trigger checkpointing * @j_wait_commit: Wait queue to trigger commit * @j_wait_updates: Wait queue to wait for updates to complete - * @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints + * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints * @j_head: Journal head - identifies the first unused block in the journal * @j_tail: Journal tail - identifies the oldest still-used block in the * journal. @@ -645,7 +646,7 @@ struct journal_s int j_barrier_count; /* The barrier lock itself */ - struct semaphore j_barrier; + struct mutex j_barrier; /* * Transactions: The current running transaction... @@ -687,7 +688,7 @@ struct journal_s wait_queue_head_t j_wait_updates; /* Semaphore for locking against concurrent checkpoints */ - struct semaphore j_checkpoint_sem; + struct mutex j_checkpoint_mutex; /* * Journal head: identifies the first unused block in the journal. diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3b507bf05d0..bb6e7ddee2f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -91,6 +91,9 @@ extern struct notifier_block *panic_notifier_list; extern long (*panic_blink)(long time); NORET_TYPE void panic(const char * fmt, ...) __attribute__ ((NORET_AND format (printf, 1, 2))); +extern void oops_enter(void); +extern void oops_exit(void); +extern int oops_may_print(void); fastcall NORET_TYPE void do_exit(long error_code) ATTRIB_NORET; NORET_TYPE void complete_and_exit(struct completion *, long) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 669756bc20a..778adc0fa64 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -36,6 +36,7 @@ #include <linux/percpu.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> +#include <linux/mutex.h> #ifdef CONFIG_KPROBES #include <asm/kprobes.h> @@ -152,7 +153,7 @@ struct kretprobe_instance { }; extern spinlock_t kretprobe_lock; -extern struct semaphore kprobe_mutex; +extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); diff --git a/include/linux/loop.h b/include/linux/loop.h index f96506782eb..e76c7611d6c 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -17,6 +17,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/spinlock.h> +#include <linux/mutex.h> /* Possible states of device */ enum { @@ -60,7 +61,7 @@ struct loop_device { int lo_state; struct completion lo_done; struct completion lo_bh_done; - struct semaphore lo_ctl_mutex; + struct mutex lo_ctl_mutex; int lo_pending; request_queue_t *lo_queue; diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index 8bcd9450d92..779e6a5744c 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -184,6 +184,7 @@ struct fat_slot_info { #include <linux/string.h> #include <linux/nls.h> #include <linux/fs.h> +#include <linux/mutex.h> struct fat_mount_options { uid_t fs_uid; @@ -226,7 +227,7 @@ struct msdos_sb_info { unsigned long max_cluster; /* maximum cluster number */ unsigned long root_cluster; /* first cluster of the root directory */ unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */ - struct semaphore fat_lock; + struct mutex fat_lock; unsigned int prev_free; /* previously allocated cluster number */ unsigned int free_clusters; /* -1 if undefined */ struct fat_mount_options options; diff --git a/include/linux/nbd.h b/include/linux/nbd.h index f95d51fae73..a6ce409ec6f 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h @@ -38,6 +38,7 @@ enum { #ifdef __KERNEL__ #include <linux/wait.h> +#include <linux/mutex.h> /* values for flags field */ #define NBD_READ_ONLY 0x0001 @@ -57,7 +58,7 @@ struct nbd_device { struct request *active_req; wait_queue_head_t active_wq; - struct semaphore tx_lock; + struct mutex tx_lock; struct gendisk *disk; int blksize; u64 bytesize; diff --git a/include/linux/ncp_fs_i.h b/include/linux/ncp_fs_i.h index 415be1ec6f9..bdb4c8ae692 100644 --- a/include/linux/ncp_fs_i.h +++ b/include/linux/ncp_fs_i.h @@ -19,7 +19,7 @@ struct ncp_inode_info { __le32 DosDirNum; __u8 volNumber; __le32 nwattr; - struct semaphore open_sem; + struct mutex open_mutex; atomic_t opened; int access; int flags; diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index cf858eb80f0..b089d950628 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/ncp_mount.h> #include <linux/net.h> +#include <linux/mutex.h> #ifdef __KERNEL__ @@ -51,7 +52,7 @@ struct ncp_server { receive replies */ int lock; /* To prevent mismatch in protocols. */ - struct semaphore sem; + struct mutex mutex; int current_size; /* for packet preparation */ int has_subfunction; @@ -96,7 +97,7 @@ struct ncp_server { struct { struct work_struct tq; /* STREAM/DGRAM: data/error ready */ struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */ - struct semaphore creq_sem; /* DGRAM only: lock accesses to rcv.creq */ + struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */ unsigned int state; /* STREAM only: receiver state */ struct { diff --git a/include/linux/pci.h b/include/linux/pci.h index fe1a2b02fc5..0aad5a378e9 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -95,6 +95,11 @@ enum pci_channel_state { pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, }; +typedef unsigned short __bitwise pci_bus_flags_t; +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = (pci_bus_flags_t) 1, +}; + /* * The pci_dev structure is used to describe PCI devices. */ @@ -203,7 +208,7 @@ struct pci_bus { char name[48]; unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ - unsigned short pad2; + pci_bus_flags_t bus_flags; /* Inherited by child busses */ struct device *bridge; struct class_device class_dev; struct bin_attribute *legacy_io; /* legacy I/O for this bus */ @@ -485,9 +490,9 @@ void pdev_sort_resources(struct pci_dev *, struct resource_list *); void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), int (*)(struct pci_dev *, u8, u8)); #define HAVE_PCI_REQ_REGIONS 2 -int pci_request_regions(struct pci_dev *, char *); +int pci_request_regions(struct pci_dev *, const char *); void pci_release_regions(struct pci_dev *); -int pci_request_region(struct pci_dev *, int, char *); +int pci_request_region(struct pci_dev *, int, const char *); void pci_release_region(struct pci_dev *, int); /* drivers/pci/bus.c */ @@ -516,6 +521,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), void *userdata); int pci_cfg_space_size(struct pci_dev *dev); +unsigned char pci_bus_max_busnr(struct pci_bus* bus); /* kmem_cache style wrapper around pci_alloc_consistent() */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ec3c3293262..f3dcf89d523 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1371,6 +1371,7 @@ #define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 #define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 #define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205 #define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 #define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 @@ -1864,11 +1865,13 @@ #define PCI_DEVICE_ID_TIGON3_5780S 0x166b #define PCI_DEVICE_ID_TIGON3_5705F 0x166e #define PCI_DEVICE_ID_TIGON3_5754M 0x1672 +#define PCI_DEVICE_ID_TIGON3_5755M 0x1673 #define PCI_DEVICE_ID_TIGON3_5750 0x1676 #define PCI_DEVICE_ID_TIGON3_5751 0x1677 #define PCI_DEVICE_ID_TIGON3_5715 0x1678 #define PCI_DEVICE_ID_TIGON3_5715S 0x1679 #define PCI_DEVICE_ID_TIGON3_5754 0x167a +#define PCI_DEVICE_ID_TIGON3_5755 0x167b #define PCI_DEVICE_ID_TIGON3_5750M 0x167c #define PCI_DEVICE_ID_TIGON3_5751M 0x167d #define PCI_DEVICE_ID_TIGON3_5751F 0x167e diff --git a/include/linux/pm.h b/include/linux/pm.h index 5be87ba3b7a..6df2585c016 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -188,6 +188,8 @@ extern void device_power_up(void); extern void device_resume(void); #ifdef CONFIG_PM +extern suspend_disk_method_t pm_disk_mode; + extern int device_suspend(pm_message_t state); #define device_set_wakeup_enable(dev,val) \ @@ -215,7 +217,6 @@ static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state) static inline void dpm_runtime_resume(struct device * dev) { - } #endif diff --git a/include/linux/profile.h b/include/linux/profile.h index 026969a5595..1f2fea6640a 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -14,6 +14,7 @@ struct proc_dir_entry; struct pt_regs; +struct notifier_block; /* init basic kernel profiler */ void __init profile_init(void); @@ -32,7 +33,6 @@ enum profile_type { #ifdef CONFIG_PROFILING -struct notifier_block; struct task_struct; struct mm_struct; diff --git a/include/linux/quota.h b/include/linux/quota.h index f33aeb22c26..8dc2d04a103 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -38,6 +38,7 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #define __DQUOT_VERSION__ "dquot_6.5.1" #define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 @@ -215,7 +216,7 @@ struct dquot { struct list_head dq_inuse; /* List of all quotas */ struct list_head dq_free; /* Free list element */ struct list_head dq_dirty; /* List of dirty dquots */ - struct semaphore dq_lock; /* dquot IO lock */ + struct mutex dq_lock; /* dquot IO lock */ atomic_t dq_count; /* Use count */ wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */ struct super_block *dq_sb; /* superblock this applies to */ @@ -285,8 +286,8 @@ struct quota_format_type { struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ - struct semaphore dqio_sem; /* lock device while I/O in progress */ - struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */ + struct mutex dqio_mutex; /* lock device while I/O in progress */ + struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h index 9d5494aaac0..3009c813d83 100644 --- a/include/linux/raid/raid1.h +++ b/include/linux/raid/raid1.h @@ -130,6 +130,6 @@ struct r1bio_s { * with failure when last write completes (and all failed). * Record that bi_end_io was called with this flag... */ -#define R1BIO_Returned 4 +#define R1BIO_Returned 6 #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index c2ec6c77874..5673008b61e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -113,8 +113,6 @@ struct rcu_data { DECLARE_PER_CPU(struct rcu_data, rcu_data); DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); -extern struct rcu_ctrlblk rcu_ctrlblk; -extern struct rcu_ctrlblk rcu_bh_ctrlblk; /* * Increment the quiescent state counter. diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index d572d537631..df0cdd41085 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -839,6 +839,7 @@ enum #define RTMGRP_IPV4_IFADDR 0x10 #define RTMGRP_IPV4_MROUTE 0x20 #define RTMGRP_IPV4_ROUTE 0x40 +#define RTMGRP_IPV4_RULE 0x80 #define RTMGRP_IPV6_IFADDR 0x100 #define RTMGRP_IPV6_MROUTE 0x200 @@ -869,7 +870,8 @@ enum rtnetlink_groups { #define RTNLGRP_IPV4_MROUTE RTNLGRP_IPV4_MROUTE RTNLGRP_IPV4_ROUTE, #define RTNLGRP_IPV4_ROUTE RTNLGRP_IPV4_ROUTE - RTNLGRP_NOP1, + RTNLGRP_IPV4_RULE, +#define RTNLGRP_IPV4_RULE RTNLGRP_IPV4_RULE RTNLGRP_IPV6_IFADDR, #define RTNLGRP_IPV6_IFADDR RTNLGRP_IPV6_IFADDR RTNLGRP_IPV6_MROUTE, diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 850a974ee50..b95f6eb7254 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,7 +4,7 @@ #include <linux/types.h> #include <linux/string.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> struct seq_operations; struct file; @@ -19,7 +19,7 @@ struct seq_file { size_t count; loff_t index; loff_t version; - struct semaphore sem; + struct mutex lock; struct seq_operations *op; void *private; }; diff --git a/include/linux/swap.h b/include/linux/swap.h index 12415dd9445..54eac8a39a4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -234,14 +234,15 @@ extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *v /* linux/mm/swapfile.c */ extern long total_swap_pages; extern unsigned int nr_swapfiles; -extern struct swap_info_struct swap_info[]; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); -extern swp_entry_t get_swap_page_of_type(int type); +extern swp_entry_t get_swap_page_of_type(int); extern int swap_duplicate(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern void swap_free(swp_entry_t); extern void free_swap_and_cache(swp_entry_t); +extern int swap_type_of(dev_t); +extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); extern struct swap_info_struct *get_swap_info_struct(unsigned); extern int can_share_swap_page(struct page *); diff --git a/include/linux/tty.h b/include/linux/tty.h index f45cd74e6f2..f13f49afe19 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -24,6 +24,7 @@ #include <linux/tty_driver.h> #include <linux/tty_ldisc.h> #include <linux/screen_info.h> +#include <linux/mutex.h> #include <asm/system.h> @@ -231,8 +232,8 @@ struct tty_struct { int canon_data; unsigned long canon_head; unsigned int canon_column; - struct semaphore atomic_read; - struct semaphore atomic_write; + struct mutex atomic_read_lock; + struct mutex atomic_write_lock; unsigned char *write_buf; int write_cnt; spinlock_t read_lock; @@ -319,8 +320,7 @@ extern void tty_ldisc_put(int); extern void tty_wakeup(struct tty_struct *tty); extern void tty_ldisc_flush(struct tty_struct *tty); -struct semaphore; -extern struct semaphore tty_sem; +extern struct mutex tty_mutex; /* n_tty.c */ extern struct tty_ldisc tty_ldisc_N_TTY; diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 222faf97d5f..0c6169fff36 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -7,14 +7,8 @@ extern int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *c extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size); extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size); -#ifdef INCLUDE_INLINE_FUNCS -#define _INLINE_ extern -#else -#define _INLINE_ static __inline__ -#endif - -_INLINE_ int tty_insert_flip_char(struct tty_struct *tty, - unsigned char ch, char flag) +static inline int tty_insert_flip_char(struct tty_struct *tty, + unsigned char ch, char flag) { struct tty_buffer *tb = tty->buf.tail; if (tb && tb->active && tb->used < tb->size) { @@ -25,7 +19,7 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty, return tty_insert_flip_string_flags(tty, &ch, &flag, 1); } -_INLINE_ void tty_schedule_flip(struct tty_struct *tty) +static inline void tty_schedule_flip(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->buf.lock, flags); diff --git a/include/linux/udf_fs_sb.h b/include/linux/udf_fs_sb.h index b15ff2e99c9..80ae9ef940d 100644 --- a/include/linux/udf_fs_sb.h +++ b/include/linux/udf_fs_sb.h @@ -13,7 +13,7 @@ #ifndef _UDF_FS_SB_H #define _UDF_FS_SB_H 1 -#include <asm/semaphore.h> +#include <linux/mutex.h> #pragma pack(1) @@ -111,7 +111,7 @@ struct udf_sb_info /* VAT inode */ struct inode *s_vat; - struct semaphore s_alloc_sem; + struct mutex s_alloc_mutex; }; #endif /* _UDF_FS_SB_H */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index fab5aed8ca3..530ae3f4248 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -73,6 +73,11 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); int vt_waitactive(int vt); void change_console(struct vc_data *new_vc); void reset_vc(struct vc_data *vc); +#ifdef CONFIG_VT +int is_console_suspend_safe(void); +#else +static inline int is_console_suspend_safe(void) { return 1; } +#endif /* * vc_screen.c shares this temporary buffer with the console write code so that diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index a05cabd0fd1..405f9031af8 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -56,6 +56,7 @@ static void __init handle_initrd(void) sys_chroot("."); mount_devfs_fs (); + current->flags |= PF_NOFREEZE; pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD); if (pid > 0) { while (pid != sys_wait4(-1, NULL, 0, NULL)) diff --git a/init/main.c b/init/main.c index 4c194c47395..2714e0e7cfe 100644 --- a/init/main.c +++ b/init/main.c @@ -325,7 +325,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else #ifdef __GENERIC_PER_CPU -unsigned long __per_cpu_offset[NR_CPUS]; +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); @@ -333,6 +333,7 @@ static void __init setup_per_cpu_areas(void) { unsigned long size, i; char *ptr; + unsigned long nr_possible_cpus = num_possible_cpus(); /* Copy section for each CPU (we discard the original) */ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); @@ -340,12 +341,12 @@ static void __init setup_per_cpu_areas(void) if (size < PERCPU_ENOUGH_ROOM) size = PERCPU_ENOUGH_ROOM; #endif + ptr = alloc_bootmem(size * nr_possible_cpus); - ptr = alloc_bootmem(size * NR_CPUS); - - for (i = 0; i < NR_CPUS; i++, ptr += size) { + for_each_cpu(i) { __per_cpu_offset[i] = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + ptr += size; } } #endif /* !__GENERIC_PER_CPU */ @@ -438,6 +439,15 @@ void __init parse_early_param(void) * Activate the first processor. */ +static void __init boot_cpu_init(void) +{ + int cpu = smp_processor_id(); + /* Mark the boot cpu "present", "online" etc for SMP and UP case */ + cpu_set(cpu, cpu_online_map); + cpu_set(cpu, cpu_present_map); + cpu_set(cpu, cpu_possible_map); +} + asmlinkage void __init start_kernel(void) { char * command_line; @@ -447,17 +457,13 @@ asmlinkage void __init start_kernel(void) * enable them */ lock_kernel(); + boot_cpu_init(); page_address_init(); printk(KERN_NOTICE); printk(linux_banner); setup_arch(&command_line); setup_per_cpu_areas(); - - /* - * Mark the boot cpu "online" so that it can call console drivers in - * printk() and can access its per-cpu storage. - */ - smp_prepare_boot_cpu(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ /* * Set up the scheduler prior starting any interrupts (such as the diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 12815d3f1a0..c86ee051b73 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -53,7 +53,7 @@ #include <asm/uaccess.h> #include <asm/atomic.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #define CPUSET_SUPER_MAGIC 0x27e0eb @@ -168,63 +168,57 @@ static struct vfsmount *cpuset_mount; static struct super_block *cpuset_sb; /* - * We have two global cpuset semaphores below. They can nest. - * It is ok to first take manage_sem, then nest callback_sem. We also + * We have two global cpuset mutexes below. They can nest. + * It is ok to first take manage_mutex, then nest callback_mutex. We also * require taking task_lock() when dereferencing a tasks cpuset pointer. * See "The task_lock() exception", at the end of this comment. * - * A task must hold both semaphores to modify cpusets. If a task - * holds manage_sem, then it blocks others wanting that semaphore, - * ensuring that it is the only task able to also acquire callback_sem + * A task must hold both mutexes to modify cpusets. If a task + * holds manage_mutex, then it blocks others wanting that mutex, + * ensuring that it is the only task able to also acquire callback_mutex * and be able to modify cpusets. It can perform various checks on * the cpuset structure first, knowing nothing will change. It can - * also allocate memory while just holding manage_sem. While it is + * also allocate memory while just holding manage_mutex. While it is * performing these checks, various callback routines can briefly - * acquire callback_sem to query cpusets. Once it is ready to make - * the changes, it takes callback_sem, blocking everyone else. + * acquire callback_mutex to query cpusets. Once it is ready to make + * the changes, it takes callback_mutex, blocking everyone else. * * Calls to the kernel memory allocator can not be made while holding - * callback_sem, as that would risk double tripping on callback_sem + * callback_mutex, as that would risk double tripping on callback_mutex * from one of the callbacks into the cpuset code from within * __alloc_pages(). * - * If a task is only holding callback_sem, then it has read-only + * If a task is only holding callback_mutex, then it has read-only * access to cpusets. * * The task_struct fields mems_allowed and mems_generation may only * be accessed in the context of that task, so require no locks. * * Any task can increment and decrement the count field without lock. - * So in general, code holding manage_sem or callback_sem can't rely + * So in general, code holding manage_mutex or callback_mutex can't rely * on the count field not changing. However, if the count goes to - * zero, then only attach_task(), which holds both semaphores, can + * zero, then only attach_task(), which holds both mutexes, can * increment it again. Because a count of zero means that no tasks * are currently attached, therefore there is no way a task attached * to that cpuset can fork (the other way to increment the count). - * So code holding manage_sem or callback_sem can safely assume that + * So code holding manage_mutex or callback_mutex can safely assume that * if the count is zero, it will stay zero. Similarly, if a task - * holds manage_sem or callback_sem on a cpuset with zero count, it + * holds manage_mutex or callback_mutex on a cpuset with zero count, it * knows that the cpuset won't be removed, as cpuset_rmdir() needs - * both of those semaphores. - * - * A possible optimization to improve parallelism would be to make - * callback_sem a R/W semaphore (rwsem), allowing the callback routines - * to proceed in parallel, with read access, until the holder of - * manage_sem needed to take this rwsem for exclusive write access - * and modify some cpusets. + * both of those mutexes. * * The cpuset_common_file_write handler for operations that modify - * the cpuset hierarchy holds manage_sem across the entire operation, + * the cpuset hierarchy holds manage_mutex across the entire operation, * single threading all such cpuset modifications across the system. * - * The cpuset_common_file_read() handlers only hold callback_sem across + * The cpuset_common_file_read() handlers only hold callback_mutex across * small pieces of code, such as when reading out possibly multi-word * cpumasks and nodemasks. * * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't - * (usually) take either semaphore. These are the two most performance + * (usually) take either mutex. These are the two most performance * critical pieces of code here. The exception occurs on cpuset_exit(), - * when a task in a notify_on_release cpuset exits. Then manage_sem + * when a task in a notify_on_release cpuset exits. Then manage_mutex * is taken, and if the cpuset count is zero, a usermode call made * to /sbin/cpuset_release_agent with the name of the cpuset (path * relative to the root of cpuset file system) as the argument. @@ -242,9 +236,9 @@ static struct super_block *cpuset_sb; * * The need for this exception arises from the action of attach_task(), * which overwrites one tasks cpuset pointer with another. It does - * so using both semaphores, however there are several performance + * so using both mutexes, however there are several performance * critical places that need to reference task->cpuset without the - * expense of grabbing a system global semaphore. Therefore except as + * expense of grabbing a system global mutex. Therefore except as * noted below, when dereferencing or, as in attach_task(), modifying * a tasks cpuset pointer we use task_lock(), which acts on a spinlock * (task->alloc_lock) already in the task_struct routinely used for @@ -256,8 +250,8 @@ static struct super_block *cpuset_sb; * the routine cpuset_update_task_memory_state(). */ -static DECLARE_MUTEX(manage_sem); -static DECLARE_MUTEX(callback_sem); +static DEFINE_MUTEX(manage_mutex); +static DEFINE_MUTEX(callback_mutex); /* * A couple of forward declarations required, due to cyclic reference loop: @@ -432,7 +426,7 @@ static inline struct cftype *__d_cft(struct dentry *dentry) } /* - * Call with manage_sem held. Writes path of cpuset into buf. + * Call with manage_mutex held. Writes path of cpuset into buf. * Returns 0 on success, -errno on error. */ @@ -484,11 +478,11 @@ static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) * status of the /sbin/cpuset_release_agent task, so no sense holding * our caller up for that. * - * When we had only one cpuset semaphore, we had to call this + * When we had only one cpuset mutex, we had to call this * without holding it, to avoid deadlock when call_usermodehelper() * allocated memory. With two locks, we could now call this while - * holding manage_sem, but we still don't, so as to minimize - * the time manage_sem is held. + * holding manage_mutex, but we still don't, so as to minimize + * the time manage_mutex is held. */ static void cpuset_release_agent(const char *pathbuf) @@ -520,15 +514,15 @@ static void cpuset_release_agent(const char *pathbuf) * cs is notify_on_release() and now both the user count is zero and * the list of children is empty, prepare cpuset path in a kmalloc'd * buffer, to be returned via ppathbuf, so that the caller can invoke - * cpuset_release_agent() with it later on, once manage_sem is dropped. - * Call here with manage_sem held. + * cpuset_release_agent() with it later on, once manage_mutex is dropped. + * Call here with manage_mutex held. * * This check_for_release() routine is responsible for kmalloc'ing * pathbuf. The above cpuset_release_agent() is responsible for * kfree'ing pathbuf. The caller of these routines is responsible * for providing a pathbuf pointer, initialized to NULL, then - * calling check_for_release() with manage_sem held and the address - * of the pathbuf pointer, then dropping manage_sem, then calling + * calling check_for_release() with manage_mutex held and the address + * of the pathbuf pointer, then dropping manage_mutex, then calling * cpuset_release_agent() with pathbuf, as set by check_for_release(). */ @@ -559,7 +553,7 @@ static void check_for_release(struct cpuset *cs, char **ppathbuf) * One way or another, we guarantee to return some non-empty subset * of cpu_online_map. * - * Call with callback_sem held. + * Call with callback_mutex held. */ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) @@ -583,7 +577,7 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) * One way or another, we guarantee to return some non-empty subset * of node_online_map. * - * Call with callback_sem held. + * Call with callback_mutex held. */ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) @@ -608,12 +602,12 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) * current->cpuset if a task has its memory placement changed. * Do not call this routine if in_interrupt(). * - * Call without callback_sem or task_lock() held. May be called - * with or without manage_sem held. Doesn't need task_lock to guard + * Call without callback_mutex or task_lock() held. May be called + * with or without manage_mutex held. Doesn't need task_lock to guard * against another task changing a non-NULL cpuset pointer to NULL, * as that is only done by a task on itself, and if the current task * is here, it is not simultaneously in the exit code NULL'ing its - * cpuset pointer. This routine also might acquire callback_sem and + * cpuset pointer. This routine also might acquire callback_mutex and * current->mm->mmap_sem during call. * * Reading current->cpuset->mems_generation doesn't need task_lock @@ -658,13 +652,13 @@ void cpuset_update_task_memory_state(void) } if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { - down(&callback_sem); + mutex_lock(&callback_mutex); task_lock(tsk); cs = tsk->cpuset; /* Maybe changed when task not locked */ guarantee_online_mems(cs, &tsk->mems_allowed); tsk->cpuset_mems_generation = cs->mems_generation; task_unlock(tsk); - up(&callback_sem); + mutex_unlock(&callback_mutex); mpol_rebind_task(tsk, &tsk->mems_allowed); } } @@ -674,7 +668,7 @@ void cpuset_update_task_memory_state(void) * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags - * are only set if the other's are set. Call holding manage_sem. + * are only set if the other's are set. Call holding manage_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) @@ -692,7 +686,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes - * manage_sem held. + * manage_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the @@ -746,7 +740,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) * exclusive child cpusets * Build these two partitions by calling partition_sched_domains * - * Call with manage_sem held. May nest a call to the + * Call with manage_mutex held. May nest a call to the * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. */ @@ -792,7 +786,7 @@ static void update_cpu_domains(struct cpuset *cur) } /* - * Call with manage_sem held. May take callback_sem during call. + * Call with manage_mutex held. May take callback_mutex during call. */ static int update_cpumask(struct cpuset *cs, char *buf) @@ -811,9 +805,9 @@ static int update_cpumask(struct cpuset *cs, char *buf) if (retval < 0) return retval; cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); - down(&callback_sem); + mutex_lock(&callback_mutex); cs->cpus_allowed = trialcs.cpus_allowed; - up(&callback_sem); + mutex_unlock(&callback_mutex); if (is_cpu_exclusive(cs) && !cpus_unchanged) update_cpu_domains(cs); return 0; @@ -827,7 +821,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) * the cpuset is marked 'memory_migrate', migrate the tasks * pages to the new memory. * - * Call with manage_sem held. May take callback_sem during call. + * Call with manage_mutex held. May take callback_mutex during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_sem, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. @@ -862,11 +856,11 @@ static int update_nodemask(struct cpuset *cs, char *buf) if (retval < 0) goto done; - down(&callback_sem); + mutex_lock(&callback_mutex); cs->mems_allowed = trialcs.mems_allowed; atomic_inc(&cpuset_mems_generation); cs->mems_generation = atomic_read(&cpuset_mems_generation); - up(&callback_sem); + mutex_unlock(&callback_mutex); set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ @@ -922,7 +916,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) * tasklist_lock. Forks can happen again now - the mpol_copy() * cpuset_being_rebound check will catch such forks, and rebind * their vma mempolicies too. Because we still hold the global - * cpuset manage_sem, we know that no other rebind effort will + * cpuset manage_mutex, we know that no other rebind effort will * be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. @@ -948,7 +942,7 @@ done: } /* - * Call with manage_sem held. + * Call with manage_mutex held. */ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) @@ -967,7 +961,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) * cs: the cpuset to update * buf: the buffer where we read the 0 or 1 * - * Call with manage_sem held. + * Call with manage_mutex held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) @@ -989,12 +983,12 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) return err; cpu_exclusive_changed = (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); - down(&callback_sem); + mutex_lock(&callback_mutex); if (turning_on) set_bit(bit, &cs->flags); else clear_bit(bit, &cs->flags); - up(&callback_sem); + mutex_unlock(&callback_mutex); if (cpu_exclusive_changed) update_cpu_domains(cs); @@ -1104,7 +1098,7 @@ static int fmeter_getrate(struct fmeter *fmp) * writing the path of the old cpuset in 'ppathbuf' if it needs to be * notified on release. * - * Call holding manage_sem. May take callback_sem and task_lock of + * Call holding manage_mutex. May take callback_mutex and task_lock of * the task 'pid' during call. */ @@ -1144,13 +1138,13 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) get_task_struct(tsk); } - down(&callback_sem); + mutex_lock(&callback_mutex); task_lock(tsk); oldcs = tsk->cpuset; if (!oldcs) { task_unlock(tsk); - up(&callback_sem); + mutex_unlock(&callback_mutex); put_task_struct(tsk); return -ESRCH; } @@ -1164,7 +1158,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) from = oldcs->mems_allowed; to = cs->mems_allowed; - up(&callback_sem); + mutex_unlock(&callback_mutex); mm = get_task_mm(tsk); if (mm) { @@ -1221,7 +1215,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us } buffer[nbytes] = 0; /* nul-terminate */ - down(&manage_sem); + mutex_lock(&manage_mutex); if (is_removed(cs)) { retval = -ENODEV; @@ -1264,7 +1258,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us if (retval == 0) retval = nbytes; out2: - up(&manage_sem); + mutex_unlock(&manage_mutex); cpuset_release_agent(pathbuf); out1: kfree(buffer); @@ -1304,9 +1298,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) { cpumask_t mask; - down(&callback_sem); + mutex_lock(&callback_mutex); mask = cs->cpus_allowed; - up(&callback_sem); + mutex_unlock(&callback_mutex); return cpulist_scnprintf(page, PAGE_SIZE, mask); } @@ -1315,9 +1309,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) { nodemask_t mask; - down(&callback_sem); + mutex_lock(&callback_mutex); mask = cs->mems_allowed; - up(&callback_sem); + mutex_unlock(&callback_mutex); return nodelist_scnprintf(page, PAGE_SIZE, mask); } @@ -1598,7 +1592,7 @@ static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) * Handle an open on 'tasks' file. Prepare a buffer listing the * process id's of tasks currently attached to the cpuset being opened. * - * Does not require any specific cpuset semaphores, and does not take any. + * Does not require any specific cpuset mutexes, and does not take any. */ static int cpuset_tasks_open(struct inode *unused, struct file *file) { @@ -1754,7 +1748,7 @@ static int cpuset_populate_dir(struct dentry *cs_dentry) * name: name of the new cpuset. Will be strcpy'ed. * mode: mode to set on new inode * - * Must be called with the semaphore on the parent inode held + * Must be called with the mutex on the parent inode held */ static long cpuset_create(struct cpuset *parent, const char *name, int mode) @@ -1766,7 +1760,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) if (!cs) return -ENOMEM; - down(&manage_sem); + mutex_lock(&manage_mutex); cpuset_update_task_memory_state(); cs->flags = 0; if (notify_on_release(parent)) @@ -1782,28 +1776,28 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) cs->parent = parent; - down(&callback_sem); + mutex_lock(&callback_mutex); list_add(&cs->sibling, &cs->parent->children); number_of_cpusets++; - up(&callback_sem); + mutex_unlock(&callback_mutex); err = cpuset_create_dir(cs, name, mode); if (err < 0) goto err; /* - * Release manage_sem before cpuset_populate_dir() because it + * Release manage_mutex before cpuset_populate_dir() because it * will down() this new directory's i_mutex and if we race with * another mkdir, we might deadlock. */ - up(&manage_sem); + mutex_unlock(&manage_mutex); err = cpuset_populate_dir(cs->dentry); /* If err < 0, we have a half-filled directory - oh well ;) */ return 0; err: list_del(&cs->sibling); - up(&manage_sem); + mutex_unlock(&manage_mutex); kfree(cs); return err; } @@ -1825,18 +1819,18 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) /* the vfs holds both inode->i_mutex already */ - down(&manage_sem); + mutex_lock(&manage_mutex); cpuset_update_task_memory_state(); if (atomic_read(&cs->count) > 0) { - up(&manage_sem); + mutex_unlock(&manage_mutex); return -EBUSY; } if (!list_empty(&cs->children)) { - up(&manage_sem); + mutex_unlock(&manage_mutex); return -EBUSY; } parent = cs->parent; - down(&callback_sem); + mutex_lock(&callback_mutex); set_bit(CS_REMOVED, &cs->flags); if (is_cpu_exclusive(cs)) update_cpu_domains(cs); @@ -1848,10 +1842,10 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) cpuset_d_remove_dir(d); dput(d); number_of_cpusets--; - up(&callback_sem); + mutex_unlock(&callback_mutex); if (list_empty(&parent->children)) check_for_release(parent, &pathbuf); - up(&manage_sem); + mutex_unlock(&manage_mutex); cpuset_release_agent(pathbuf); return 0; } @@ -1960,19 +1954,19 @@ void cpuset_fork(struct task_struct *child) * Description: Detach cpuset from @tsk and release it. * * Note that cpusets marked notify_on_release force every task in - * them to take the global manage_sem semaphore when exiting. + * them to take the global manage_mutex mutex when exiting. * This could impact scaling on very large systems. Be reluctant to * use notify_on_release cpusets where very high task exit scaling * is required on large systems. * * Don't even think about derefencing 'cs' after the cpuset use count - * goes to zero, except inside a critical section guarded by manage_sem - * or callback_sem. Otherwise a zero cpuset use count is a license to + * goes to zero, except inside a critical section guarded by manage_mutex + * or callback_mutex. Otherwise a zero cpuset use count is a license to * any other task to nuke the cpuset immediately, via cpuset_rmdir(). * - * This routine has to take manage_sem, not callback_sem, because - * it is holding that semaphore while calling check_for_release(), - * which calls kmalloc(), so can't be called holding callback__sem(). + * This routine has to take manage_mutex, not callback_mutex, because + * it is holding that mutex while calling check_for_release(), + * which calls kmalloc(), so can't be called holding callback_mutex(). * * We don't need to task_lock() this reference to tsk->cpuset, * because tsk is already marked PF_EXITING, so attach_task() won't @@ -2022,10 +2016,10 @@ void cpuset_exit(struct task_struct *tsk) if (notify_on_release(cs)) { char *pathbuf = NULL; - down(&manage_sem); + mutex_lock(&manage_mutex); if (atomic_dec_and_test(&cs->count)) check_for_release(cs, &pathbuf); - up(&manage_sem); + mutex_unlock(&manage_mutex); cpuset_release_agent(pathbuf); } else { atomic_dec(&cs->count); @@ -2046,11 +2040,11 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) { cpumask_t mask; - down(&callback_sem); + mutex_lock(&callback_mutex); task_lock(tsk); guarantee_online_cpus(tsk->cpuset, &mask); task_unlock(tsk); - up(&callback_sem); + mutex_unlock(&callback_mutex); return mask; } @@ -2074,11 +2068,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) { nodemask_t mask; - down(&callback_sem); + mutex_lock(&callback_mutex); task_lock(tsk); guarantee_online_mems(tsk->cpuset, &mask); task_unlock(tsk); - up(&callback_sem); + mutex_unlock(&callback_mutex); return mask; } @@ -2104,7 +2098,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) /* * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive - * ancestor to the specified cpuset. Call holding callback_sem. + * ancestor to the specified cpuset. Call holding callback_mutex. * If no ancestor is mem_exclusive (an unusual configuration), then * returns the root cpuset. */ @@ -2131,12 +2125,12 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) * GFP_KERNEL allocations are not so marked, so can escape to the * nearest mem_exclusive ancestor cpuset. * - * Scanning up parent cpusets requires callback_sem. The __alloc_pages() + * Scanning up parent cpusets requires callback_mutex. The __alloc_pages() * routine only calls here with __GFP_HARDWALL bit _not_ set if * it's a GFP_KERNEL allocation, and all nodes in the current tasks * mems_allowed came up empty on the first pass over the zonelist. * So only GFP_KERNEL allocations, if all nodes in the cpuset are - * short of memory, might require taking the callback_sem semaphore. + * short of memory, might require taking the callback_mutex mutex. * * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing @@ -2171,31 +2165,31 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) return 1; /* Not hardwall and node outside mems_allowed: scan up cpusets */ - down(&callback_sem); + mutex_lock(&callback_mutex); task_lock(current); cs = nearest_exclusive_ancestor(current->cpuset); task_unlock(current); allowed = node_isset(node, cs->mems_allowed); - up(&callback_sem); + mutex_unlock(&callback_mutex); return allowed; } /** * cpuset_lock - lock out any changes to cpuset structures * - * The out of memory (oom) code needs to lock down cpusets + * The out of memory (oom) code needs to mutex_lock cpusets * from being changed while it scans the tasklist looking for a - * task in an overlapping cpuset. Expose callback_sem via this + * task in an overlapping cpuset. Expose callback_mutex via this * cpuset_lock() routine, so the oom code can lock it, before * locking the task list. The tasklist_lock is a spinlock, so - * must be taken inside callback_sem. + * must be taken inside callback_mutex. */ void cpuset_lock(void) { - down(&callback_sem); + mutex_lock(&callback_mutex); } /** @@ -2206,7 +2200,7 @@ void cpuset_lock(void) void cpuset_unlock(void) { - up(&callback_sem); + mutex_unlock(&callback_mutex); } /** @@ -2218,7 +2212,7 @@ void cpuset_unlock(void) * determine if task @p's memory usage might impact the memory * available to the current task. * - * Call while holding callback_sem. + * Call while holding callback_mutex. **/ int cpuset_excl_nodes_overlap(const struct task_struct *p) @@ -2289,7 +2283,7 @@ void __cpuset_memory_pressure_bump(void) * - Used for /proc/<pid>/cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, - * and we take manage_sem, keeping attach_task() from changing it + * and we take manage_mutex, keeping attach_task() from changing it * anyway. */ @@ -2305,7 +2299,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) return -ENOMEM; tsk = m->private; - down(&manage_sem); + mutex_lock(&manage_mutex); cs = tsk->cpuset; if (!cs) { retval = -EINVAL; @@ -2318,7 +2312,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) seq_puts(m, buf); seq_putc(m, '\n'); out: - up(&manage_sem); + mutex_unlock(&manage_mutex); kfree(buf); return retval; } diff --git a/kernel/exit.c b/kernel/exit.c index d1e8d500a7e..8037405e136 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -345,9 +345,9 @@ void daemonize(const char *name, ...) exit_mm(current); set_special_pids(1, 1); - down(&tty_sem); + mutex_lock(&tty_mutex); current->signal->tty = NULL; - up(&tty_sem); + mutex_unlock(&tty_mutex); /* Block and flush all signals */ sigfillset(&blocked); diff --git a/kernel/fork.c b/kernel/fork.c index 9bd7b65ee41..c79ae0b19a4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -607,12 +607,12 @@ static struct files_struct *alloc_files(void) atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); + newf->next_fd = 0; fdt = &newf->fdtab; - fdt->next_fd = 0; fdt->max_fds = NR_OPEN_DEFAULT; - fdt->max_fdset = __FD_SETSIZE; - fdt->close_on_exec = &newf->close_on_exec_init; - fdt->open_fds = &newf->open_fds_init; + fdt->max_fdset = EMBEDDED_FD_SET_SIZE; + fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; + fdt->open_fds = (fd_set *)&newf->open_fds_init; fdt->fd = &newf->fd_array[0]; INIT_RCU_HEAD(&fdt->rcu); fdt->free_files = NULL; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index fef1af8a73c..1fb9f753ef6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -48,7 +48,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; -DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ +DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; @@ -460,7 +460,7 @@ static int __kprobes __register_kprobe(struct kprobe *p, } p->nmissed = 0; - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); old_p = get_kprobe(p->addr); if (old_p) { ret = register_aggr_kprobe(old_p, p); @@ -477,7 +477,7 @@ static int __kprobes __register_kprobe(struct kprobe *p, arch_arm_kprobe(p); out: - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); if (ret && probed_mod) module_put(probed_mod); @@ -496,10 +496,10 @@ void __kprobes unregister_kprobe(struct kprobe *p) struct kprobe *old_p, *list_p; int cleanup_p; - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); old_p = get_kprobe(p->addr); if (unlikely(!old_p)) { - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); return; } if (p != old_p) { @@ -507,7 +507,7 @@ void __kprobes unregister_kprobe(struct kprobe *p) if (list_p == p) /* kprobe p is a valid probe */ goto valid_p; - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); return; } valid_p: @@ -523,7 +523,7 @@ valid_p: cleanup_p = 0; } - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); synchronize_sched(); if (p->mod_refcounted && diff --git a/kernel/kthread.c b/kernel/kthread.c index e75950a1092..6a5373868a9 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -12,6 +12,7 @@ #include <linux/unistd.h> #include <linux/file.h> #include <linux/module.h> +#include <linux/mutex.h> #include <asm/semaphore.h> /* @@ -41,7 +42,7 @@ struct kthread_stop_info /* Thread stopping is done by setthing this var: lock serializes * multiple kthread_stop calls. */ -static DECLARE_MUTEX(kthread_stop_lock); +static DEFINE_MUTEX(kthread_stop_lock); static struct kthread_stop_info kthread_stop_info; int kthread_should_stop(void) @@ -173,7 +174,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s) { int ret; - down(&kthread_stop_lock); + mutex_lock(&kthread_stop_lock); /* It could exit after stop_info.k set, but before wake_up_process. */ get_task_struct(k); @@ -194,7 +195,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s) wait_for_completion(&kthread_stop_info.done); kthread_stop_info.k = NULL; ret = kthread_stop_info.err; - up(&kthread_stop_lock); + mutex_unlock(&kthread_stop_lock); return ret; } diff --git a/kernel/module.c b/kernel/module.c index 77764f22f02..fb404299082 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -39,6 +39,7 @@ #include <linux/device.h> #include <linux/string.h> #include <linux/sched.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/semaphore.h> #include <asm/cacheflush.h> @@ -60,18 +61,18 @@ static DEFINE_SPINLOCK(modlist_lock); /* List of modules, protected by module_mutex AND modlist_lock */ -static DECLARE_MUTEX(module_mutex); +static DEFINE_MUTEX(module_mutex); static LIST_HEAD(modules); -static DECLARE_MUTEX(notify_mutex); +static DEFINE_MUTEX(notify_mutex); static struct notifier_block * module_notify_list; int register_module_notifier(struct notifier_block * nb) { int err; - down(¬ify_mutex); + mutex_lock(¬ify_mutex); err = notifier_chain_register(&module_notify_list, nb); - up(¬ify_mutex); + mutex_unlock(¬ify_mutex); return err; } EXPORT_SYMBOL(register_module_notifier); @@ -79,9 +80,9 @@ EXPORT_SYMBOL(register_module_notifier); int unregister_module_notifier(struct notifier_block * nb) { int err; - down(¬ify_mutex); + mutex_lock(¬ify_mutex); err = notifier_chain_unregister(&module_notify_list, nb); - up(¬ify_mutex); + mutex_unlock(¬ify_mutex); return err; } EXPORT_SYMBOL(unregister_module_notifier); @@ -601,7 +602,7 @@ static void free_module(struct module *mod); static void wait_for_zero_refcount(struct module *mod) { /* Since we might sleep for some time, drop the semaphore first */ - up(&module_mutex); + mutex_unlock(&module_mutex); for (;;) { DEBUGP("Looking at refcount...\n"); set_current_state(TASK_UNINTERRUPTIBLE); @@ -610,7 +611,7 @@ static void wait_for_zero_refcount(struct module *mod) schedule(); } current->state = TASK_RUNNING; - down(&module_mutex); + mutex_lock(&module_mutex); } asmlinkage long @@ -627,7 +628,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags) return -EFAULT; name[MODULE_NAME_LEN-1] = '\0'; - if (down_interruptible(&module_mutex) != 0) + if (mutex_lock_interruptible(&module_mutex) != 0) return -EINTR; mod = find_module(name); @@ -676,14 +677,14 @@ sys_delete_module(const char __user *name_user, unsigned int flags) /* Final destruction now noone is using it. */ if (mod->exit != NULL) { - up(&module_mutex); + mutex_unlock(&module_mutex); mod->exit(); - down(&module_mutex); + mutex_lock(&module_mutex); } free_module(mod); out: - up(&module_mutex); + mutex_unlock(&module_mutex); return ret; } @@ -1972,13 +1973,13 @@ sys_init_module(void __user *umod, return -EPERM; /* Only one module load at a time, please */ - if (down_interruptible(&module_mutex) != 0) + if (mutex_lock_interruptible(&module_mutex) != 0) return -EINTR; /* Do all the hard work */ mod = load_module(umod, len, uargs); if (IS_ERR(mod)) { - up(&module_mutex); + mutex_unlock(&module_mutex); return PTR_ERR(mod); } @@ -1987,11 +1988,11 @@ sys_init_module(void __user *umod, stop_machine_run(__link_module, mod, NR_CPUS); /* Drop lock so they can recurse */ - up(&module_mutex); + mutex_unlock(&module_mutex); - down(¬ify_mutex); + mutex_lock(¬ify_mutex); notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); - up(¬ify_mutex); + mutex_unlock(¬ify_mutex); /* Start the module */ if (mod->init != NULL) @@ -2006,15 +2007,15 @@ sys_init_module(void __user *umod, mod->name); else { module_put(mod); - down(&module_mutex); + mutex_lock(&module_mutex); free_module(mod); - up(&module_mutex); + mutex_unlock(&module_mutex); } return ret; } /* Now it's a first class citizen! */ - down(&module_mutex); + mutex_lock(&module_mutex); mod->state = MODULE_STATE_LIVE; /* Drop initial reference. */ module_put(mod); @@ -2022,7 +2023,7 @@ sys_init_module(void __user *umod, mod->module_init = NULL; mod->init_size = 0; mod->init_text_size = 0; - up(&module_mutex); + mutex_unlock(&module_mutex); return 0; } @@ -2112,7 +2113,7 @@ struct module *module_get_kallsym(unsigned int symnum, { struct module *mod; - down(&module_mutex); + mutex_lock(&module_mutex); list_for_each_entry(mod, &modules, list) { if (symnum < mod->num_symtab) { *value = mod->symtab[symnum].st_value; @@ -2120,12 +2121,12 @@ struct module *module_get_kallsym(unsigned int symnum, strncpy(namebuf, mod->strtab + mod->symtab[symnum].st_name, 127); - up(&module_mutex); + mutex_unlock(&module_mutex); return mod; } symnum -= mod->num_symtab; } - up(&module_mutex); + mutex_unlock(&module_mutex); return NULL; } @@ -2168,7 +2169,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) struct list_head *i; loff_t n = 0; - down(&module_mutex); + mutex_lock(&module_mutex); list_for_each(i, &modules) { if (n++ == *pos) break; @@ -2189,7 +2190,7 @@ static void *m_next(struct seq_file *m, void *p, loff_t *pos) static void m_stop(struct seq_file *m, void *p) { - up(&module_mutex); + mutex_unlock(&module_mutex); } static int m_show(struct seq_file *m, void *p) diff --git a/kernel/panic.c b/kernel/panic.c index 126dc43f1c7..acd95adddb9 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -20,10 +20,13 @@ #include <linux/nmi.h> #include <linux/kexec.h> -int panic_timeout; int panic_on_oops; int tainted; +static int pause_on_oops; +static int pause_on_oops_flag; +static DEFINE_SPINLOCK(pause_on_oops_lock); +int panic_timeout; EXPORT_SYMBOL(panic_timeout); struct notifier_block *panic_notifier_list; @@ -174,3 +177,95 @@ void add_taint(unsigned flag) tainted |= flag; } EXPORT_SYMBOL(add_taint); + +static int __init pause_on_oops_setup(char *str) +{ + pause_on_oops = simple_strtoul(str, NULL, 0); + return 1; +} +__setup("pause_on_oops=", pause_on_oops_setup); + +static void spin_msec(int msecs) +{ + int i; + + for (i = 0; i < msecs; i++) { + touch_nmi_watchdog(); + mdelay(1); + } +} + +/* + * It just happens that oops_enter() and oops_exit() are identically + * implemented... + */ +static void do_oops_enter_exit(void) +{ + unsigned long flags; + static int spin_counter; + + if (!pause_on_oops) + return; + + spin_lock_irqsave(&pause_on_oops_lock, flags); + if (pause_on_oops_flag == 0) { + /* This CPU may now print the oops message */ + pause_on_oops_flag = 1; + } else { + /* We need to stall this CPU */ + if (!spin_counter) { + /* This CPU gets to do the counting */ + spin_counter = pause_on_oops; + do { + spin_unlock(&pause_on_oops_lock); + spin_msec(MSEC_PER_SEC); + spin_lock(&pause_on_oops_lock); + } while (--spin_counter); + pause_on_oops_flag = 0; + } else { + /* This CPU waits for a different one */ + while (spin_counter) { + spin_unlock(&pause_on_oops_lock); + spin_msec(1); + spin_lock(&pause_on_oops_lock); + } + } + } + spin_unlock_irqrestore(&pause_on_oops_lock, flags); +} + +/* + * Return true if the calling CPU is allowed to print oops-related info. This + * is a bit racy.. + */ +int oops_may_print(void) +{ + return pause_on_oops_flag == 0; +} + +/* + * Called when the architecture enters its oops handler, before it prints + * anything. If this is the first CPU to oops, and it's oopsing the first time + * then let it proceed. + * + * This is all enabled by the pause_on_oops kernel boot option. We do all this + * to ensure that oopses don't scroll off the screen. It has the side-effect + * of preventing later-oopsing CPUs from mucking up the display, too. + * + * It turns out that the CPU which is allowed to print ends up pausing for the + * right duration, whereas all the other CPUs pause for twice as long: once in + * oops_enter(), once in oops_exit(). + */ +void oops_enter(void) +{ + do_oops_enter_exit(); +} + +/* + * Called when the architecture exits its oops handler, after printing + * everything. + */ +void oops_exit(void) +{ + do_oops_enter_exit(); +} diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index fa895fc2ecf..9944379360b 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -35,6 +35,7 @@ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/time.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/semaphore.h> diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 04be7d0d96a..8d0af3d37a4 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -5,7 +5,7 @@ endif obj-y := main.o process.o console.o obj-$(CONFIG_PM_LEGACY) += pm.o -obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o +obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o swap.o user.o obj-$(CONFIG_SUSPEND_SMP) += smp.o diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 0b43847dc98..81d4d982f3f 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -22,17 +22,6 @@ #include "power.h" -extern suspend_disk_method_t pm_disk_mode; - -extern int swsusp_shrink_memory(void); -extern int swsusp_suspend(void); -extern int swsusp_write(struct pbe *pblist, unsigned int nr_pages); -extern int swsusp_check(void); -extern int swsusp_read(struct pbe **pblist_ptr); -extern void swsusp_close(void); -extern int swsusp_resume(void); - - static int noresume = 0; char resume_file[256] = CONFIG_PM_STD_PARTITION; dev_t swsusp_resume_device; @@ -70,10 +59,6 @@ static void power_down(suspend_disk_method_t mode) while(1); } - -static int in_suspend __nosavedata = 0; - - static inline void platform_finish(void) { if (pm_disk_mode == PM_DISK_PLATFORM) { @@ -87,7 +72,6 @@ static int prepare_processes(void) int error; pm_prepare_console(); - sys_sync(); disable_nonboot_cpus(); if (freeze_processes()) { @@ -145,7 +129,7 @@ int pm_suspend_disk(void) if (in_suspend) { device_resume(); pr_debug("PM: writing image.\n"); - error = swsusp_write(pagedir_nosave, nr_copy_pages); + error = swsusp_write(); if (!error) power_down(pm_disk_mode); else { @@ -216,7 +200,7 @@ static int software_resume(void) pr_debug("PM: Reading swsusp image.\n"); - if ((error = swsusp_read(&pagedir_nosave))) { + if ((error = swsusp_read())) { swsusp_free(); goto Thaw; } diff --git a/kernel/power/main.c b/kernel/power/main.c index 9cb235cba4a..ee371f50cca 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -103,7 +103,7 @@ static int suspend_prepare(suspend_state_t state) } -static int suspend_enter(suspend_state_t state) +int suspend_enter(suspend_state_t state) { int error = 0; unsigned long flags; diff --git a/kernel/power/pm.c b/kernel/power/pm.c index 33c508e857d..0f6908cce1d 100644 --- a/kernel/power/pm.c +++ b/kernel/power/pm.c @@ -25,6 +25,7 @@ #include <linux/pm.h> #include <linux/pm_legacy.h> #include <linux/interrupt.h> +#include <linux/mutex.h> int pm_active; @@ -40,7 +41,7 @@ int pm_active; * until a resume but that will be fine. */ -static DECLARE_MUTEX(pm_devs_lock); +static DEFINE_MUTEX(pm_devs_lock); static LIST_HEAD(pm_devs); /** @@ -67,9 +68,9 @@ struct pm_dev *pm_register(pm_dev_t type, dev->id = id; dev->callback = callback; - down(&pm_devs_lock); + mutex_lock(&pm_devs_lock); list_add(&dev->entry, &pm_devs); - up(&pm_devs_lock); + mutex_unlock(&pm_devs_lock); } return dev; } @@ -85,9 +86,9 @@ struct pm_dev *pm_register(pm_dev_t type, void pm_unregister(struct pm_dev *dev) { if (dev) { - down(&pm_devs_lock); + mutex_lock(&pm_devs_lock); list_del(&dev->entry); - up(&pm_devs_lock); + mutex_unlock(&pm_devs_lock); kfree(dev); } @@ -118,7 +119,7 @@ void pm_unregister_all(pm_callback callback) if (!callback) return; - down(&pm_devs_lock); + mutex_lock(&pm_devs_lock); entry = pm_devs.next; while (entry != &pm_devs) { struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); @@ -126,7 +127,7 @@ void pm_unregister_all(pm_callback callback) if (dev->callback == callback) __pm_unregister(dev); } - up(&pm_devs_lock); + mutex_unlock(&pm_devs_lock); } /** @@ -234,7 +235,7 @@ int pm_send_all(pm_request_t rqst, void *data) { struct list_head *entry; - down(&pm_devs_lock); + mutex_lock(&pm_devs_lock); entry = pm_devs.next; while (entry != &pm_devs) { struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); @@ -246,13 +247,13 @@ int pm_send_all(pm_request_t rqst, void *data) */ if (rqst == PM_SUSPEND) pm_undo_all(dev); - up(&pm_devs_lock); + mutex_unlock(&pm_devs_lock); return status; } } entry = entry->next; } - up(&pm_devs_lock); + mutex_unlock(&pm_devs_lock); return 0; } diff --git a/kernel/power/power.h b/kernel/power/power.h index 388dba68084..f06f12f2176 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -8,6 +8,7 @@ struct swsusp_info { int cpus; unsigned long image_pages; unsigned long pages; + unsigned long size; } __attribute__((aligned(PAGE_SIZE))); @@ -37,21 +38,79 @@ extern struct subsystem power_subsys; /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; -extern unsigned int nr_copy_pages; extern struct pbe *pagedir_nosave; /* Preferred image size in bytes (default 500 MB) */ extern unsigned long image_size; +extern int in_suspend; +extern dev_t swsusp_resume_device; extern asmlinkage int swsusp_arch_suspend(void); extern asmlinkage int swsusp_arch_resume(void); extern unsigned int count_data_pages(void); -extern void free_pagedir(struct pbe *pblist); -extern void release_eaten_pages(void); -extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed); + +struct snapshot_handle { + loff_t offset; + unsigned int page; + unsigned int page_offset; + unsigned int prev; + struct pbe *pbe; + void *buffer; + unsigned int buf_offset; +}; + +#define data_of(handle) ((handle).buffer + (handle).buf_offset) + +extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); +extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); +int snapshot_image_loaded(struct snapshot_handle *handle); + +#define SNAPSHOT_IOC_MAGIC '3' +#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) +#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2) +#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *) +#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4) +#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5) +#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long) +#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *) +#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *) +#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9) +#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int) +#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11) +#define SNAPSHOT_IOC_MAXNR 11 + +/** + * The bitmap is used for tracing allocated swap pages + * + * The entire bitmap consists of a number of bitmap_page + * structures linked with the help of the .next member. + * Thus each page can be allocated individually, so we only + * need to make 0-order memory allocations to create + * the bitmap. + */ + +#define BITMAP_PAGE_SIZE (PAGE_SIZE - sizeof(void *)) +#define BITMAP_PAGE_CHUNKS (BITMAP_PAGE_SIZE / sizeof(long)) +#define BITS_PER_CHUNK (sizeof(long) * 8) +#define BITMAP_PAGE_BITS (BITMAP_PAGE_CHUNKS * BITS_PER_CHUNK) + +struct bitmap_page { + unsigned long chunks[BITMAP_PAGE_CHUNKS]; + struct bitmap_page *next; +}; + +extern void free_bitmap(struct bitmap_page *bitmap); +extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits); +extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap); +extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap); + +extern int swsusp_check(void); +extern int swsusp_shrink_memory(void); extern void swsusp_free(void); -extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); -extern unsigned int snapshot_nr_pages(void); -extern struct pbe *snapshot_pblist(void); -extern void snapshot_pblist_set(struct pbe *pblist); +extern int swsusp_suspend(void); +extern int swsusp_resume(void); +extern int swsusp_read(void); +extern int swsusp_write(void); +extern void swsusp_close(void); +extern int suspend_enter(suspend_state_t state); diff --git a/kernel/power/process.c b/kernel/power/process.c index 28de118f7a0..8ac7c35fad7 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -12,11 +12,12 @@ #include <linux/interrupt.h> #include <linux/suspend.h> #include <linux/module.h> +#include <linux/syscalls.h> /* * Timeout for stopping processes */ -#define TIMEOUT (6 * HZ) +#define TIMEOUT (20 * HZ) static inline int freezeable(struct task_struct * p) @@ -54,38 +55,62 @@ void refrigerator(void) current->state = save; } +static inline void freeze_process(struct task_struct *p) +{ + unsigned long flags; + + if (!freezing(p)) { + freeze(p); + spin_lock_irqsave(&p->sighand->siglock, flags); + signal_wake_up(p, 0); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } +} + /* 0 = success, else # of processes that we failed to stop */ int freeze_processes(void) { - int todo; + int todo, nr_user, user_frozen; unsigned long start_time; struct task_struct *g, *p; unsigned long flags; printk( "Stopping tasks: " ); start_time = jiffies; + user_frozen = 0; do { - todo = 0; + nr_user = todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (!freezeable(p)) continue; if (frozen(p)) continue; - - freeze(p); - spin_lock_irqsave(&p->sighand->siglock, flags); - signal_wake_up(p, 0); - spin_unlock_irqrestore(&p->sighand->siglock, flags); - todo++; + if (p->mm && !(p->flags & PF_BORROWED_MM)) { + /* The task is a user-space one. + * Freeze it unless there's a vfork completion + * pending + */ + if (!p->vfork_done) + freeze_process(p); + nr_user++; + } else { + /* Freeze only if the user space is frozen */ + if (user_frozen) + freeze_process(p); + todo++; + } } while_each_thread(g, p); read_unlock(&tasklist_lock); + todo += nr_user; + if (!user_frozen && !nr_user) { + sys_sync(); + start_time = jiffies; + } + user_frozen = !nr_user; yield(); /* Yield is okay here */ - if (todo && time_after(jiffies, start_time + TIMEOUT)) { - printk( "\n" ); - printk(KERN_ERR " stopping tasks failed (%d tasks remaining)\n", todo ); + if (todo && time_after(jiffies, start_time + TIMEOUT)) break; - } } while(todo); /* This does not unfreeze processes that are already frozen @@ -94,8 +119,14 @@ int freeze_processes(void) * but it cleans up leftover PF_FREEZE requests. */ if (todo) { + printk( "\n" ); + printk(KERN_ERR " stopping tasks timed out " + "after %d seconds (%d tasks remaining):\n", + TIMEOUT / HZ, todo); read_lock(&tasklist_lock); - do_each_thread(g, p) + do_each_thread(g, p) { + if (freezeable(p) && !frozen(p)) + printk(KERN_ERR " %s\n", p->comm); if (freezing(p)) { pr_debug(" clean up: %s\n", p->comm); p->flags &= ~PF_FREEZE; @@ -103,7 +134,7 @@ int freeze_processes(void) recalc_sigpending_tsk(p); spin_unlock_irqrestore(&p->sighand->siglock, flags); } - while_each_thread(g, p); + } while_each_thread(g, p); read_unlock(&tasklist_lock); return todo; } diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 8d5a5986d62..c5863d02c89 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -10,6 +10,7 @@ */ +#include <linux/version.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/suspend.h> @@ -34,7 +35,9 @@ #include "power.h" struct pbe *pagedir_nosave; -unsigned int nr_copy_pages; +static unsigned int nr_copy_pages; +static unsigned int nr_meta_pages; +static unsigned long *buffer; #ifdef CONFIG_HIGHMEM unsigned int count_highmem_pages(void) @@ -80,7 +83,7 @@ static int save_highmem_zone(struct zone *zone) void *kaddr; unsigned long pfn = zone_pfn + zone->zone_start_pfn; - if (!(pfn%1000)) + if (!(pfn%10000)) printk("."); if (!pfn_valid(pfn)) continue; @@ -119,13 +122,15 @@ int save_highmem(void) struct zone *zone; int res = 0; - pr_debug("swsusp: Saving Highmem\n"); + pr_debug("swsusp: Saving Highmem"); + drain_local_pages(); for_each_zone (zone) { if (is_highmem(zone)) res = save_highmem_zone(zone); if (res) return res; } + printk("\n"); return 0; } @@ -235,7 +240,7 @@ static void copy_data_pages(struct pbe *pblist) * free_pagedir - free pages allocated with alloc_pagedir() */ -void free_pagedir(struct pbe *pblist) +static void free_pagedir(struct pbe *pblist) { struct pbe *pbe; @@ -301,7 +306,7 @@ struct eaten_page { static struct eaten_page *eaten_pages = NULL; -void release_eaten_pages(void) +static void release_eaten_pages(void) { struct eaten_page *p, *q; @@ -376,7 +381,6 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed if (!nr_pages) return NULL; - pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages); pblist = alloc_image_page(gfp_mask, safe_needed); /* FIXME: rewrite this ugly loop */ for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; @@ -388,7 +392,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed free_pagedir(pblist); pblist = NULL; } else - create_pbe_list(pblist, nr_pages); + create_pbe_list(pblist, nr_pages); return pblist; } @@ -414,6 +418,10 @@ void swsusp_free(void) } } } + nr_copy_pages = 0; + nr_meta_pages = 0; + pagedir_nosave = NULL; + buffer = NULL; } @@ -437,7 +445,7 @@ static int enough_free_mem(unsigned int nr_pages) (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); } -int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) +static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) { struct pbe *p; @@ -504,7 +512,318 @@ asmlinkage int swsusp_save(void) */ nr_copy_pages = nr_pages; + nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT; printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); return 0; } + +static void init_header(struct swsusp_info *info) +{ + memset(info, 0, sizeof(struct swsusp_info)); + info->version_code = LINUX_VERSION_CODE; + info->num_physpages = num_physpages; + memcpy(&info->uts, &system_utsname, sizeof(system_utsname)); + info->cpus = num_online_cpus(); + info->image_pages = nr_copy_pages; + info->pages = nr_copy_pages + nr_meta_pages + 1; + info->size = info->pages; + info->size <<= PAGE_SHIFT; +} + +/** + * pack_orig_addresses - the .orig_address fields of the PBEs from the + * list starting at @pbe are stored in the array @buf[] (1 page) + */ + +static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe) +{ + int j; + + for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { + buf[j] = pbe->orig_address; + pbe = pbe->next; + } + if (!pbe) + for (; j < PAGE_SIZE / sizeof(long); j++) + buf[j] = 0; + return pbe; +} + +/** + * snapshot_read_next - used for reading the system memory snapshot. + * + * On the first call to it @handle should point to a zeroed + * snapshot_handle structure. The structure gets updated and a pointer + * to it should be passed to this function every next time. + * + * The @count parameter should contain the number of bytes the caller + * wants to read from the snapshot. It must not be zero. + * + * On success the function returns a positive number. Then, the caller + * is allowed to read up to the returned number of bytes from the memory + * location computed by the data_of() macro. The number returned + * may be smaller than @count, but this only happens if the read would + * cross a page boundary otherwise. + * + * The function returns 0 to indicate the end of data stream condition, + * and a negative number is returned on error. In such cases the + * structure pointed to by @handle is not updated and should not be used + * any more. + */ + +int snapshot_read_next(struct snapshot_handle *handle, size_t count) +{ + if (handle->page > nr_meta_pages + nr_copy_pages) + return 0; + if (!buffer) { + /* This makes the buffer be freed by swsusp_free() */ + buffer = alloc_image_page(GFP_ATOMIC, 0); + if (!buffer) + return -ENOMEM; + } + if (!handle->offset) { + init_header((struct swsusp_info *)buffer); + handle->buffer = buffer; + handle->pbe = pagedir_nosave; + } + if (handle->prev < handle->page) { + if (handle->page <= nr_meta_pages) { + handle->pbe = pack_orig_addresses(buffer, handle->pbe); + if (!handle->pbe) + handle->pbe = pagedir_nosave; + } else { + handle->buffer = (void *)handle->pbe->address; + handle->pbe = handle->pbe->next; + } + handle->prev = handle->page; + } + handle->buf_offset = handle->page_offset; + if (handle->page_offset + count >= PAGE_SIZE) { + count = PAGE_SIZE - handle->page_offset; + handle->page_offset = 0; + handle->page++; + } else { + handle->page_offset += count; + } + handle->offset += count; + return count; +} + +/** + * mark_unsafe_pages - mark the pages that cannot be used for storing + * the image during resume, because they conflict with the pages that + * had been used before suspend + */ + +static int mark_unsafe_pages(struct pbe *pblist) +{ + struct zone *zone; + unsigned long zone_pfn; + struct pbe *p; + + if (!pblist) /* a sanity check */ + return -EINVAL; + + /* Clear page flags */ + for_each_zone (zone) { + for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) + if (pfn_valid(zone_pfn + zone->zone_start_pfn)) + ClearPageNosaveFree(pfn_to_page(zone_pfn + + zone->zone_start_pfn)); + } + + /* Mark orig addresses */ + for_each_pbe (p, pblist) { + if (virt_addr_valid(p->orig_address)) + SetPageNosaveFree(virt_to_page(p->orig_address)); + else + return -EFAULT; + } + + return 0; +} + +static void copy_page_backup_list(struct pbe *dst, struct pbe *src) +{ + /* We assume both lists contain the same number of elements */ + while (src) { + dst->orig_address = src->orig_address; + dst = dst->next; + src = src->next; + } +} + +static int check_header(struct swsusp_info *info) +{ + char *reason = NULL; + + if (info->version_code != LINUX_VERSION_CODE) + reason = "kernel version"; + if (info->num_physpages != num_physpages) + reason = "memory size"; + if (strcmp(info->uts.sysname,system_utsname.sysname)) + reason = "system type"; + if (strcmp(info->uts.release,system_utsname.release)) + reason = "kernel release"; + if (strcmp(info->uts.version,system_utsname.version)) + reason = "version"; + if (strcmp(info->uts.machine,system_utsname.machine)) + reason = "machine"; + if (reason) { + printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); + return -EPERM; + } + return 0; +} + +/** + * load header - check the image header and copy data from it + */ + +static int load_header(struct snapshot_handle *handle, + struct swsusp_info *info) +{ + int error; + struct pbe *pblist; + + error = check_header(info); + if (!error) { + pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0); + if (!pblist) + return -ENOMEM; + pagedir_nosave = pblist; + handle->pbe = pblist; + nr_copy_pages = info->image_pages; + nr_meta_pages = info->pages - info->image_pages - 1; + } + return error; +} + +/** + * unpack_orig_addresses - copy the elements of @buf[] (1 page) to + * the PBEs in the list starting at @pbe + */ + +static inline struct pbe *unpack_orig_addresses(unsigned long *buf, + struct pbe *pbe) +{ + int j; + + for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { + pbe->orig_address = buf[j]; + pbe = pbe->next; + } + return pbe; +} + +/** + * create_image - use metadata contained in the PBE list + * pointed to by pagedir_nosave to mark the pages that will + * be overwritten in the process of restoring the system + * memory state from the image and allocate memory for + * the image avoiding these pages + */ + +static int create_image(struct snapshot_handle *handle) +{ + int error = 0; + struct pbe *p, *pblist; + + p = pagedir_nosave; + error = mark_unsafe_pages(p); + if (!error) { + pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); + if (pblist) + copy_page_backup_list(pblist, p); + free_pagedir(p); + if (!pblist) + error = -ENOMEM; + } + if (!error) + error = alloc_data_pages(pblist, GFP_ATOMIC, 1); + if (!error) { + release_eaten_pages(); + pagedir_nosave = pblist; + } else { + pagedir_nosave = NULL; + handle->pbe = NULL; + nr_copy_pages = 0; + nr_meta_pages = 0; + } + return error; +} + +/** + * snapshot_write_next - used for writing the system memory snapshot. + * + * On the first call to it @handle should point to a zeroed + * snapshot_handle structure. The structure gets updated and a pointer + * to it should be passed to this function every next time. + * + * The @count parameter should contain the number of bytes the caller + * wants to write to the image. It must not be zero. + * + * On success the function returns a positive number. Then, the caller + * is allowed to write up to the returned number of bytes to the memory + * location computed by the data_of() macro. The number returned + * may be smaller than @count, but this only happens if the write would + * cross a page boundary otherwise. + * + * The function returns 0 to indicate the "end of file" condition, + * and a negative number is returned on error. In such cases the + * structure pointed to by @handle is not updated and should not be used + * any more. + */ + +int snapshot_write_next(struct snapshot_handle *handle, size_t count) +{ + int error = 0; + + if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages) + return 0; + if (!buffer) { + /* This makes the buffer be freed by swsusp_free() */ + buffer = alloc_image_page(GFP_ATOMIC, 0); + if (!buffer) + return -ENOMEM; + } + if (!handle->offset) + handle->buffer = buffer; + if (handle->prev < handle->page) { + if (!handle->prev) { + error = load_header(handle, (struct swsusp_info *)buffer); + if (error) + return error; + } else if (handle->prev <= nr_meta_pages) { + handle->pbe = unpack_orig_addresses(buffer, handle->pbe); + if (!handle->pbe) { + error = create_image(handle); + if (error) + return error; + handle->pbe = pagedir_nosave; + handle->buffer = (void *)handle->pbe->address; + } + } else { + handle->pbe = handle->pbe->next; + handle->buffer = (void *)handle->pbe->address; + } + handle->prev = handle->page; + } + handle->buf_offset = handle->page_offset; + if (handle->page_offset + count >= PAGE_SIZE) { + count = PAGE_SIZE - handle->page_offset; + handle->page_offset = 0; + handle->page++; + } else { + handle->page_offset += count; + } + handle->offset += count; + return count; +} + +int snapshot_image_loaded(struct snapshot_handle *handle) +{ + return !(!handle->pbe || handle->pbe->next || !nr_copy_pages || + handle->page <= nr_meta_pages + nr_copy_pages); +} diff --git a/kernel/power/swap.c b/kernel/power/swap.c new file mode 100644 index 00000000000..9177f3f73a6 --- /dev/null +++ b/kernel/power/swap.c @@ -0,0 +1,544 @@ +/* + * linux/kernel/power/swap.c + * + * This file provides functions for reading the suspend image from + * and writing it to a swap partition. + * + * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz> + * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> + * + * This file is released under the GPLv2. + * + */ + +#include <linux/module.h> +#include <linux/smp_lock.h> +#include <linux/file.h> +#include <linux/utsname.h> +#include <linux/version.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <linux/genhd.h> +#include <linux/device.h> +#include <linux/buffer_head.h> +#include <linux/bio.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <linux/pm.h> + +#include "power.h" + +extern char resume_file[]; + +#define SWSUSP_SIG "S1SUSPEND" + +static struct swsusp_header { + char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)]; + swp_entry_t image; + char orig_sig[10]; + char sig[10]; +} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; + +/* + * Saving part... + */ + +static unsigned short root_swap = 0xffff; + +static int mark_swapfiles(swp_entry_t start) +{ + int error; + + rw_swap_page_sync(READ, + swp_entry(root_swap, 0), + virt_to_page((unsigned long)&swsusp_header)); + if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) || + !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { + memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); + memcpy(swsusp_header.sig,SWSUSP_SIG, 10); + swsusp_header.image = start; + error = rw_swap_page_sync(WRITE, + swp_entry(root_swap, 0), + virt_to_page((unsigned long) + &swsusp_header)); + } else { + pr_debug("swsusp: Partition is not swap space.\n"); + error = -ENODEV; + } + return error; +} + +/** + * swsusp_swap_check - check if the resume device is a swap device + * and get its index (if so) + */ + +static int swsusp_swap_check(void) /* This is called before saving image */ +{ + int res = swap_type_of(swsusp_resume_device); + + if (res >= 0) { + root_swap = res; + return 0; + } + return res; +} + +/** + * write_page - Write one page to given swap location. + * @buf: Address we're writing. + * @offset: Offset of the swap page we're writing to. + */ + +static int write_page(void *buf, unsigned long offset) +{ + swp_entry_t entry; + int error = -ENOSPC; + + if (offset) { + entry = swp_entry(root_swap, offset); + error = rw_swap_page_sync(WRITE, entry, virt_to_page(buf)); + } + return error; +} + +/* + * The swap map is a data structure used for keeping track of each page + * written to a swap partition. It consists of many swap_map_page + * structures that contain each an array of MAP_PAGE_SIZE swap entries. + * These structures are stored on the swap and linked together with the + * help of the .next_swap member. + * + * The swap map is created during suspend. The swap map pages are + * allocated and populated one at a time, so we only need one memory + * page to set up the entire structure. + * + * During resume we also only need to use one swap_map_page structure + * at a time. + */ + +#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(long) - 1) + +struct swap_map_page { + unsigned long entries[MAP_PAGE_ENTRIES]; + unsigned long next_swap; +}; + +/** + * The swap_map_handle structure is used for handling swap in + * a file-alike way + */ + +struct swap_map_handle { + struct swap_map_page *cur; + unsigned long cur_swap; + struct bitmap_page *bitmap; + unsigned int k; +}; + +static void release_swap_writer(struct swap_map_handle *handle) +{ + if (handle->cur) + free_page((unsigned long)handle->cur); + handle->cur = NULL; + if (handle->bitmap) + free_bitmap(handle->bitmap); + handle->bitmap = NULL; +} + +static int get_swap_writer(struct swap_map_handle *handle) +{ + handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); + if (!handle->cur) + return -ENOMEM; + handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0)); + if (!handle->bitmap) { + release_swap_writer(handle); + return -ENOMEM; + } + handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap); + if (!handle->cur_swap) { + release_swap_writer(handle); + return -ENOSPC; + } + handle->k = 0; + return 0; +} + +static int swap_write_page(struct swap_map_handle *handle, void *buf) +{ + int error; + unsigned long offset; + + if (!handle->cur) + return -EINVAL; + offset = alloc_swap_page(root_swap, handle->bitmap); + error = write_page(buf, offset); + if (error) + return error; + handle->cur->entries[handle->k++] = offset; + if (handle->k >= MAP_PAGE_ENTRIES) { + offset = alloc_swap_page(root_swap, handle->bitmap); + if (!offset) + return -ENOSPC; + handle->cur->next_swap = offset; + error = write_page(handle->cur, handle->cur_swap); + if (error) + return error; + memset(handle->cur, 0, PAGE_SIZE); + handle->cur_swap = offset; + handle->k = 0; + } + return 0; +} + +static int flush_swap_writer(struct swap_map_handle *handle) +{ + if (handle->cur && handle->cur_swap) + return write_page(handle->cur, handle->cur_swap); + else + return -EINVAL; +} + +/** + * save_image - save the suspend image data + */ + +static int save_image(struct swap_map_handle *handle, + struct snapshot_handle *snapshot, + unsigned int nr_pages) +{ + unsigned int m; + int ret; + int error = 0; + + printk("Saving image data pages (%u pages) ... ", nr_pages); + m = nr_pages / 100; + if (!m) + m = 1; + nr_pages = 0; + do { + ret = snapshot_read_next(snapshot, PAGE_SIZE); + if (ret > 0) { + error = swap_write_page(handle, data_of(*snapshot)); + if (error) + break; + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + } + } while (ret > 0); + if (!error) + printk("\b\b\b\bdone\n"); + return error; +} + +/** + * enough_swap - Make sure we have enough swap to save the image. + * + * Returns TRUE or FALSE after checking the total amount of swap + * space avaiable from the resume partition. + */ + +static int enough_swap(unsigned int nr_pages) +{ + unsigned int free_swap = count_swap_pages(root_swap, 1); + + pr_debug("swsusp: free swap pages: %u\n", free_swap); + return free_swap > (nr_pages + PAGES_FOR_IO + + (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); +} + +/** + * swsusp_write - Write entire image and metadata. + * + * It is important _NOT_ to umount filesystems at this point. We want + * them synced (in case something goes wrong) but we DO not want to mark + * filesystem clean: it is not. (And it does not matter, if we resume + * correctly, we'll mark system clean, anyway.) + */ + +int swsusp_write(void) +{ + struct swap_map_handle handle; + struct snapshot_handle snapshot; + struct swsusp_info *header; + unsigned long start; + int error; + + if ((error = swsusp_swap_check())) { + printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n"); + return error; + } + memset(&snapshot, 0, sizeof(struct snapshot_handle)); + error = snapshot_read_next(&snapshot, PAGE_SIZE); + if (error < PAGE_SIZE) + return error < 0 ? error : -EFAULT; + header = (struct swsusp_info *)data_of(snapshot); + if (!enough_swap(header->pages)) { + printk(KERN_ERR "swsusp: Not enough free swap\n"); + return -ENOSPC; + } + error = get_swap_writer(&handle); + if (!error) { + start = handle.cur_swap; + error = swap_write_page(&handle, header); + } + if (!error) + error = save_image(&handle, &snapshot, header->pages - 1); + if (!error) { + flush_swap_writer(&handle); + printk("S"); + error = mark_swapfiles(swp_entry(root_swap, start)); + printk("|\n"); + } + if (error) + free_all_swap_pages(root_swap, handle.bitmap); + release_swap_writer(&handle); + return error; +} + +/* + * Using bio to read from swap. + * This code requires a bit more work than just using buffer heads + * but, it is the recommended way for 2.5/2.6. + * The following are to signal the beginning and end of I/O. Bios + * finish asynchronously, while we want them to happen synchronously. + * A simple atomic_t, and a wait loop take care of this problem. + */ + +static atomic_t io_done = ATOMIC_INIT(0); + +static int end_io(struct bio *bio, unsigned int num, int err) +{ + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + panic("I/O error reading memory image"); + atomic_set(&io_done, 0); + return 0; +} + +static struct block_device *resume_bdev; + +/** + * submit - submit BIO request. + * @rw: READ or WRITE. + * @off physical offset of page. + * @page: page we're reading or writing. + * + * Straight from the textbook - allocate and initialize the bio. + * If we're writing, make sure the page is marked as dirty. + * Then submit it and wait. + */ + +static int submit(int rw, pgoff_t page_off, void *page) +{ + int error = 0; + struct bio *bio; + + bio = bio_alloc(GFP_ATOMIC, 1); + if (!bio) + return -ENOMEM; + bio->bi_sector = page_off * (PAGE_SIZE >> 9); + bio->bi_bdev = resume_bdev; + bio->bi_end_io = end_io; + + if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) { + printk("swsusp: ERROR: adding page to bio at %ld\n",page_off); + error = -EFAULT; + goto Done; + } + + atomic_set(&io_done, 1); + submit_bio(rw | (1 << BIO_RW_SYNC), bio); + while (atomic_read(&io_done)) + yield(); + if (rw == READ) + bio_set_pages_dirty(bio); + Done: + bio_put(bio); + return error; +} + +static int bio_read_page(pgoff_t page_off, void *page) +{ + return submit(READ, page_off, page); +} + +static int bio_write_page(pgoff_t page_off, void *page) +{ + return submit(WRITE, page_off, page); +} + +/** + * The following functions allow us to read data using a swap map + * in a file-alike way + */ + +static void release_swap_reader(struct swap_map_handle *handle) +{ + if (handle->cur) + free_page((unsigned long)handle->cur); + handle->cur = NULL; +} + +static int get_swap_reader(struct swap_map_handle *handle, + swp_entry_t start) +{ + int error; + + if (!swp_offset(start)) + return -EINVAL; + handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); + if (!handle->cur) + return -ENOMEM; + error = bio_read_page(swp_offset(start), handle->cur); + if (error) { + release_swap_reader(handle); + return error; + } + handle->k = 0; + return 0; +} + +static int swap_read_page(struct swap_map_handle *handle, void *buf) +{ + unsigned long offset; + int error; + + if (!handle->cur) + return -EINVAL; + offset = handle->cur->entries[handle->k]; + if (!offset) + return -EFAULT; + error = bio_read_page(offset, buf); + if (error) + return error; + if (++handle->k >= MAP_PAGE_ENTRIES) { + handle->k = 0; + offset = handle->cur->next_swap; + if (!offset) + release_swap_reader(handle); + else + error = bio_read_page(offset, handle->cur); + } + return error; +} + +/** + * load_image - load the image using the swap map handle + * @handle and the snapshot handle @snapshot + * (assume there are @nr_pages pages to load) + */ + +static int load_image(struct swap_map_handle *handle, + struct snapshot_handle *snapshot, + unsigned int nr_pages) +{ + unsigned int m; + int ret; + int error = 0; + + printk("Loading image data pages (%u pages) ... ", nr_pages); + m = nr_pages / 100; + if (!m) + m = 1; + nr_pages = 0; + do { + ret = snapshot_write_next(snapshot, PAGE_SIZE); + if (ret > 0) { + error = swap_read_page(handle, data_of(*snapshot)); + if (error) + break; + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + } + } while (ret > 0); + if (!error) + printk("\b\b\b\bdone\n"); + if (!snapshot_image_loaded(snapshot)) + error = -ENODATA; + return error; +} + +int swsusp_read(void) +{ + int error; + struct swap_map_handle handle; + struct snapshot_handle snapshot; + struct swsusp_info *header; + + if (IS_ERR(resume_bdev)) { + pr_debug("swsusp: block device not initialised\n"); + return PTR_ERR(resume_bdev); + } + + memset(&snapshot, 0, sizeof(struct snapshot_handle)); + error = snapshot_write_next(&snapshot, PAGE_SIZE); + if (error < PAGE_SIZE) + return error < 0 ? error : -EFAULT; + header = (struct swsusp_info *)data_of(snapshot); + error = get_swap_reader(&handle, swsusp_header.image); + if (!error) + error = swap_read_page(&handle, header); + if (!error) + error = load_image(&handle, &snapshot, header->pages - 1); + release_swap_reader(&handle); + + blkdev_put(resume_bdev); + + if (!error) + pr_debug("swsusp: Reading resume file was successful\n"); + else + pr_debug("swsusp: Error %d resuming\n", error); + return error; +} + +/** + * swsusp_check - Check for swsusp signature in the resume device + */ + +int swsusp_check(void) +{ + int error; + + resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); + if (!IS_ERR(resume_bdev)) { + set_blocksize(resume_bdev, PAGE_SIZE); + memset(&swsusp_header, 0, sizeof(swsusp_header)); + if ((error = bio_read_page(0, &swsusp_header))) + return error; + if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { + memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); + /* Reset swap signature now */ + error = bio_write_page(0, &swsusp_header); + } else { + return -EINVAL; + } + if (error) + blkdev_put(resume_bdev); + else + pr_debug("swsusp: Signature found, resuming\n"); + } else { + error = PTR_ERR(resume_bdev); + } + + if (error) + pr_debug("swsusp: Error %d check for resume file\n", error); + + return error; +} + +/** + * swsusp_close - close swap device. + */ + +void swsusp_close(void) +{ + if (IS_ERR(resume_bdev)) { + pr_debug("swsusp: block device not initialised\n"); + return; + } + + blkdev_put(resume_bdev); +} diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 2d9d08f72f7..c4016cbbd3e 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -31,41 +31,24 @@ * Fixed runaway init * * Rafael J. Wysocki <rjw@sisk.pl> - * Added the swap map data structure and reworked the handling of swap + * Reworked the freeing of memory and the handling of swap * * More state savers are welcome. Especially for the scsi layer... * * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt */ -#include <linux/module.h> #include <linux/mm.h> #include <linux/suspend.h> -#include <linux/smp_lock.h> -#include <linux/file.h> -#include <linux/utsname.h> -#include <linux/version.h> -#include <linux/delay.h> -#include <linux/bitops.h> #include <linux/spinlock.h> -#include <linux/genhd.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/swap.h> #include <linux/pm.h> -#include <linux/device.h> -#include <linux/buffer_head.h> #include <linux/swapops.h> #include <linux/bootmem.h> #include <linux/syscalls.h> #include <linux/highmem.h> -#include <linux/bio.h> - -#include <asm/uaccess.h> -#include <asm/mmu_context.h> -#include <asm/pgtable.h> -#include <asm/tlbflush.h> -#include <asm/io.h> #include "power.h" @@ -77,6 +60,8 @@ */ unsigned long image_size = 500 * 1024 * 1024; +int in_suspend __nosavedata = 0; + #ifdef CONFIG_HIGHMEM unsigned int count_highmem_pages(void); int save_highmem(void); @@ -87,471 +72,97 @@ static int restore_highmem(void) { return 0; } static unsigned int count_highmem_pages(void) { return 0; } #endif -extern char resume_file[]; - -#define SWSUSP_SIG "S1SUSPEND" - -static struct swsusp_header { - char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)]; - swp_entry_t image; - char orig_sig[10]; - char sig[10]; -} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; - -static struct swsusp_info swsusp_info; - -/* - * Saving part... - */ - -static unsigned short root_swap = 0xffff; - -static int mark_swapfiles(swp_entry_t start) -{ - int error; - - rw_swap_page_sync(READ, - swp_entry(root_swap, 0), - virt_to_page((unsigned long)&swsusp_header)); - if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) || - !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { - memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); - memcpy(swsusp_header.sig,SWSUSP_SIG, 10); - swsusp_header.image = start; - error = rw_swap_page_sync(WRITE, - swp_entry(root_swap, 0), - virt_to_page((unsigned long) - &swsusp_header)); - } else { - pr_debug("swsusp: Partition is not swap space.\n"); - error = -ENODEV; - } - return error; -} - -/* - * Check whether the swap device is the specified resume - * device, irrespective of whether they are specified by - * identical names. - * - * (Thus, device inode aliasing is allowed. You can say /dev/hda4 - * instead of /dev/ide/host0/bus0/target0/lun0/part4 [if using devfs] - * and they'll be considered the same device. This is *necessary* for - * devfs, since the resume code can only recognize the form /dev/hda4, - * but the suspend code would see the long name.) - */ -static inline int is_resume_device(const struct swap_info_struct *swap_info) -{ - struct file *file = swap_info->swap_file; - struct inode *inode = file->f_dentry->d_inode; - - return S_ISBLK(inode->i_mode) && - swsusp_resume_device == MKDEV(imajor(inode), iminor(inode)); -} - -static int swsusp_swap_check(void) /* This is called before saving image */ -{ - int i; - - spin_lock(&swap_lock); - for (i = 0; i < MAX_SWAPFILES; i++) { - if (!(swap_info[i].flags & SWP_WRITEOK)) - continue; - if (!swsusp_resume_device || is_resume_device(swap_info + i)) { - spin_unlock(&swap_lock); - root_swap = i; - return 0; - } - } - spin_unlock(&swap_lock); - return -ENODEV; -} - -/** - * write_page - Write one page to a fresh swap location. - * @addr: Address we're writing. - * @loc: Place to store the entry we used. - * - * Allocate a new swap entry and 'sync' it. Note we discard -EIO - * errors. That is an artifact left over from swsusp. It did not - * check the return of rw_swap_page_sync() at all, since most pages - * written back to swap would return -EIO. - * This is a partial improvement, since we will at least return other - * errors, though we need to eventually fix the damn code. - */ -static int write_page(unsigned long addr, swp_entry_t *loc) -{ - swp_entry_t entry; - int error = -ENOSPC; - - entry = get_swap_page_of_type(root_swap); - if (swp_offset(entry)) { - error = rw_swap_page_sync(WRITE, entry, virt_to_page(addr)); - if (!error || error == -EIO) - *loc = entry; - } - return error; -} - /** - * Swap map-handling functions - * - * The swap map is a data structure used for keeping track of each page - * written to the swap. It consists of many swap_map_page structures - * that contain each an array of MAP_PAGE_SIZE swap entries. - * These structures are linked together with the help of either the - * .next (in memory) or the .next_swap (in swap) member. + * The following functions are used for tracing the allocated + * swap pages, so that they can be freed in case of an error. * - * The swap map is created during suspend. At that time we need to keep - * it in memory, because we have to free all of the allocated swap - * entries if an error occurs. The memory needed is preallocated - * so that we know in advance if there's enough of it. - * - * The first swap_map_page structure is filled with the swap entries that - * correspond to the first MAP_PAGE_SIZE data pages written to swap and - * so on. After the all of the data pages have been written, the order - * of the swap_map_page structures in the map is reversed so that they - * can be read from swap in the original order. This causes the data - * pages to be loaded in exactly the same order in which they have been - * saved. - * - * During resume we only need to use one swap_map_page structure - * at a time, which means that we only need to use two memory pages for - * reading the image - one for reading the swap_map_page structures - * and the second for reading the data pages from swap. + * The functions operate on a linked bitmap structure defined + * in power.h */ -#define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \ - / sizeof(swp_entry_t)) - -struct swap_map_page { - swp_entry_t entries[MAP_PAGE_SIZE]; - swp_entry_t next_swap; - struct swap_map_page *next; -}; - -static inline void free_swap_map(struct swap_map_page *swap_map) +void free_bitmap(struct bitmap_page *bitmap) { - struct swap_map_page *swp; + struct bitmap_page *bp; - while (swap_map) { - swp = swap_map->next; - free_page((unsigned long)swap_map); - swap_map = swp; + while (bitmap) { + bp = bitmap->next; + free_page((unsigned long)bitmap); + bitmap = bp; } } -static struct swap_map_page *alloc_swap_map(unsigned int nr_pages) +struct bitmap_page *alloc_bitmap(unsigned int nr_bits) { - struct swap_map_page *swap_map, *swp; - unsigned n = 0; + struct bitmap_page *bitmap, *bp; + unsigned int n; - if (!nr_pages) + if (!nr_bits) return NULL; - pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages); - swap_map = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); - swp = swap_map; - for (n = MAP_PAGE_SIZE; n < nr_pages; n += MAP_PAGE_SIZE) { - swp->next = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); - swp = swp->next; - if (!swp) { - free_swap_map(swap_map); + bitmap = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL); + bp = bitmap; + for (n = BITMAP_PAGE_BITS; n < nr_bits; n += BITMAP_PAGE_BITS) { + bp->next = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL); + bp = bp->next; + if (!bp) { + free_bitmap(bitmap); return NULL; } } - return swap_map; + return bitmap; } -/** - * reverse_swap_map - reverse the order of pages in the swap map - * @swap_map - */ - -static inline struct swap_map_page *reverse_swap_map(struct swap_map_page *swap_map) -{ - struct swap_map_page *prev, *next; - - prev = NULL; - while (swap_map) { - next = swap_map->next; - swap_map->next = prev; - prev = swap_map; - swap_map = next; - } - return prev; -} - -/** - * free_swap_map_entries - free the swap entries allocated to store - * the swap map @swap_map (this is only called in case of an error) - */ -static inline void free_swap_map_entries(struct swap_map_page *swap_map) -{ - while (swap_map) { - if (swap_map->next_swap.val) - swap_free(swap_map->next_swap); - swap_map = swap_map->next; - } -} - -/** - * save_swap_map - save the swap map used for tracing the data pages - * stored in the swap - */ - -static int save_swap_map(struct swap_map_page *swap_map, swp_entry_t *start) -{ - swp_entry_t entry = (swp_entry_t){0}; - int error; - - while (swap_map) { - swap_map->next_swap = entry; - if ((error = write_page((unsigned long)swap_map, &entry))) - return error; - swap_map = swap_map->next; - } - *start = entry; - return 0; -} - -/** - * free_image_entries - free the swap entries allocated to store - * the image data pages (this is only called in case of an error) - */ - -static inline void free_image_entries(struct swap_map_page *swp) +static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit) { - unsigned k; + unsigned int n; - while (swp) { - for (k = 0; k < MAP_PAGE_SIZE; k++) - if (swp->entries[k].val) - swap_free(swp->entries[k]); - swp = swp->next; + n = BITMAP_PAGE_BITS; + while (bitmap && n <= bit) { + n += BITMAP_PAGE_BITS; + bitmap = bitmap->next; } -} - -/** - * The swap_map_handle structure is used for handling the swap map in - * a file-alike way - */ - -struct swap_map_handle { - struct swap_map_page *cur; - unsigned int k; -}; - -static inline void init_swap_map_handle(struct swap_map_handle *handle, - struct swap_map_page *map) -{ - handle->cur = map; - handle->k = 0; -} - -static inline int swap_map_write_page(struct swap_map_handle *handle, - unsigned long addr) -{ - int error; - - error = write_page(addr, handle->cur->entries + handle->k); - if (error) - return error; - if (++handle->k >= MAP_PAGE_SIZE) { - handle->cur = handle->cur->next; - handle->k = 0; + if (!bitmap) + return -EINVAL; + n -= BITMAP_PAGE_BITS; + bit -= n; + n = 0; + while (bit >= BITS_PER_CHUNK) { + bit -= BITS_PER_CHUNK; + n++; } + bitmap->chunks[n] |= (1UL << bit); return 0; } -/** - * save_image_data - save the data pages pointed to by the PBEs - * from the list @pblist using the swap map handle @handle - * (assume there are @nr_pages data pages to save) - */ - -static int save_image_data(struct pbe *pblist, - struct swap_map_handle *handle, - unsigned int nr_pages) -{ - unsigned int m; - struct pbe *p; - int error = 0; - - printk("Saving image data pages (%u pages) ... ", nr_pages); - m = nr_pages / 100; - if (!m) - m = 1; - nr_pages = 0; - for_each_pbe (p, pblist) { - error = swap_map_write_page(handle, p->address); - if (error) - break; - if (!(nr_pages % m)) - printk("\b\b\b\b%3d%%", nr_pages / m); - nr_pages++; - } - if (!error) - printk("\b\b\b\bdone\n"); - return error; -} - -static void dump_info(void) -{ - pr_debug(" swsusp: Version: %u\n",swsusp_info.version_code); - pr_debug(" swsusp: Num Pages: %ld\n",swsusp_info.num_physpages); - pr_debug(" swsusp: UTS Sys: %s\n",swsusp_info.uts.sysname); - pr_debug(" swsusp: UTS Node: %s\n",swsusp_info.uts.nodename); - pr_debug(" swsusp: UTS Release: %s\n",swsusp_info.uts.release); - pr_debug(" swsusp: UTS Version: %s\n",swsusp_info.uts.version); - pr_debug(" swsusp: UTS Machine: %s\n",swsusp_info.uts.machine); - pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname); - pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus); - pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages); - pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info.pages); -} - -static void init_header(unsigned int nr_pages) -{ - memset(&swsusp_info, 0, sizeof(swsusp_info)); - swsusp_info.version_code = LINUX_VERSION_CODE; - swsusp_info.num_physpages = num_physpages; - memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname)); - - swsusp_info.cpus = num_online_cpus(); - swsusp_info.image_pages = nr_pages; - swsusp_info.pages = nr_pages + - ((nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; -} - -/** - * pack_orig_addresses - the .orig_address fields of the PBEs from the - * list starting at @pbe are stored in the array @buf[] (1 page) - */ - -static inline struct pbe *pack_orig_addresses(unsigned long *buf, - struct pbe *pbe) -{ - int j; - - for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { - buf[j] = pbe->orig_address; - pbe = pbe->next; - } - if (!pbe) - for (; j < PAGE_SIZE / sizeof(long); j++) - buf[j] = 0; - return pbe; -} - -/** - * save_image_metadata - save the .orig_address fields of the PBEs - * from the list @pblist using the swap map handle @handle - */ - -static int save_image_metadata(struct pbe *pblist, - struct swap_map_handle *handle) +unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap) { - unsigned long *buf; - unsigned int n = 0; - struct pbe *p; - int error = 0; + unsigned long offset; - printk("Saving image metadata ... "); - buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC); - if (!buf) - return -ENOMEM; - p = pblist; - while (p) { - p = pack_orig_addresses(buf, p); - error = swap_map_write_page(handle, (unsigned long)buf); - if (error) - break; - n++; + offset = swp_offset(get_swap_page_of_type(swap)); + if (offset) { + if (bitmap_set(bitmap, offset)) { + swap_free(swp_entry(swap, offset)); + offset = 0; + } } - free_page((unsigned long)buf); - if (!error) - printk("done (%u pages saved)\n", n); - return error; + return offset; } -/** - * enough_swap - Make sure we have enough swap to save the image. - * - * Returns TRUE or FALSE after checking the total amount of swap - * space avaiable from the resume partition. - */ - -static int enough_swap(unsigned int nr_pages) +void free_all_swap_pages(int swap, struct bitmap_page *bitmap) { - unsigned int free_swap = swap_info[root_swap].pages - - swap_info[root_swap].inuse_pages; - - pr_debug("swsusp: free swap pages: %u\n", free_swap); - return free_swap > (nr_pages + PAGES_FOR_IO + - (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); -} + unsigned int bit, n; + unsigned long test; -/** - * swsusp_write - Write entire image and metadata. - * - * It is important _NOT_ to umount filesystems at this point. We want - * them synced (in case something goes wrong) but we DO not want to mark - * filesystem clean: it is not. (And it does not matter, if we resume - * correctly, we'll mark system clean, anyway.) - */ - -int swsusp_write(struct pbe *pblist, unsigned int nr_pages) -{ - struct swap_map_page *swap_map; - struct swap_map_handle handle; - swp_entry_t start; - int error; - - if ((error = swsusp_swap_check())) { - printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n"); - return error; - } - if (!enough_swap(nr_pages)) { - printk(KERN_ERR "swsusp: Not enough free swap\n"); - return -ENOSPC; + bit = 0; + while (bitmap) { + for (n = 0; n < BITMAP_PAGE_CHUNKS; n++) + for (test = 1UL; test; test <<= 1) { + if (bitmap->chunks[n] & test) + swap_free(swp_entry(swap, bit)); + bit++; + } + bitmap = bitmap->next; } - - init_header(nr_pages); - swap_map = alloc_swap_map(swsusp_info.pages); - if (!swap_map) - return -ENOMEM; - init_swap_map_handle(&handle, swap_map); - - error = swap_map_write_page(&handle, (unsigned long)&swsusp_info); - if (!error) - error = save_image_metadata(pblist, &handle); - if (!error) - error = save_image_data(pblist, &handle, nr_pages); - if (error) - goto Free_image_entries; - - swap_map = reverse_swap_map(swap_map); - error = save_swap_map(swap_map, &start); - if (error) - goto Free_map_entries; - - dump_info(); - printk( "S" ); - error = mark_swapfiles(start); - printk( "|\n" ); - if (error) - goto Free_map_entries; - -Free_swap_map: - free_swap_map(swap_map); - return error; - -Free_map_entries: - free_swap_map_entries(swap_map); -Free_image_entries: - free_image_entries(swap_map); - goto Free_swap_map; } /** @@ -660,379 +271,3 @@ int swsusp_resume(void) local_irq_enable(); return error; } - -/** - * mark_unsafe_pages - mark the pages that cannot be used for storing - * the image during resume, because they conflict with the pages that - * had been used before suspend - */ - -static void mark_unsafe_pages(struct pbe *pblist) -{ - struct zone *zone; - unsigned long zone_pfn; - struct pbe *p; - - if (!pblist) /* a sanity check */ - return; - - /* Clear page flags */ - for_each_zone (zone) { - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) - if (pfn_valid(zone_pfn + zone->zone_start_pfn)) - ClearPageNosaveFree(pfn_to_page(zone_pfn + - zone->zone_start_pfn)); - } - - /* Mark orig addresses */ - for_each_pbe (p, pblist) - SetPageNosaveFree(virt_to_page(p->orig_address)); - -} - -static void copy_page_backup_list(struct pbe *dst, struct pbe *src) -{ - /* We assume both lists contain the same number of elements */ - while (src) { - dst->orig_address = src->orig_address; - dst = dst->next; - src = src->next; - } -} - -/* - * Using bio to read from swap. - * This code requires a bit more work than just using buffer heads - * but, it is the recommended way for 2.5/2.6. - * The following are to signal the beginning and end of I/O. Bios - * finish asynchronously, while we want them to happen synchronously. - * A simple atomic_t, and a wait loop take care of this problem. - */ - -static atomic_t io_done = ATOMIC_INIT(0); - -static int end_io(struct bio *bio, unsigned int num, int err) -{ - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - panic("I/O error reading memory image"); - atomic_set(&io_done, 0); - return 0; -} - -static struct block_device *resume_bdev; - -/** - * submit - submit BIO request. - * @rw: READ or WRITE. - * @off physical offset of page. - * @page: page we're reading or writing. - * - * Straight from the textbook - allocate and initialize the bio. - * If we're writing, make sure the page is marked as dirty. - * Then submit it and wait. - */ - -static int submit(int rw, pgoff_t page_off, void *page) -{ - int error = 0; - struct bio *bio; - - bio = bio_alloc(GFP_ATOMIC, 1); - if (!bio) - return -ENOMEM; - bio->bi_sector = page_off * (PAGE_SIZE >> 9); - bio->bi_bdev = resume_bdev; - bio->bi_end_io = end_io; - - if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) { - printk("swsusp: ERROR: adding page to bio at %ld\n",page_off); - error = -EFAULT; - goto Done; - } - - - atomic_set(&io_done, 1); - submit_bio(rw | (1 << BIO_RW_SYNC), bio); - while (atomic_read(&io_done)) - yield(); - if (rw == READ) - bio_set_pages_dirty(bio); - Done: - bio_put(bio); - return error; -} - -static int bio_read_page(pgoff_t page_off, void *page) -{ - return submit(READ, page_off, page); -} - -static int bio_write_page(pgoff_t page_off, void *page) -{ - return submit(WRITE, page_off, page); -} - -/** - * The following functions allow us to read data using a swap map - * in a file-alike way - */ - -static inline void release_swap_map_reader(struct swap_map_handle *handle) -{ - if (handle->cur) - free_page((unsigned long)handle->cur); - handle->cur = NULL; -} - -static inline int get_swap_map_reader(struct swap_map_handle *handle, - swp_entry_t start) -{ - int error; - - if (!swp_offset(start)) - return -EINVAL; - handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); - if (!handle->cur) - return -ENOMEM; - error = bio_read_page(swp_offset(start), handle->cur); - if (error) { - release_swap_map_reader(handle); - return error; - } - handle->k = 0; - return 0; -} - -static inline int swap_map_read_page(struct swap_map_handle *handle, void *buf) -{ - unsigned long offset; - int error; - - if (!handle->cur) - return -EINVAL; - offset = swp_offset(handle->cur->entries[handle->k]); - if (!offset) - return -EINVAL; - error = bio_read_page(offset, buf); - if (error) - return error; - if (++handle->k >= MAP_PAGE_SIZE) { - handle->k = 0; - offset = swp_offset(handle->cur->next_swap); - if (!offset) - release_swap_map_reader(handle); - else - error = bio_read_page(offset, handle->cur); - } - return error; -} - -static int check_header(void) -{ - char *reason = NULL; - - dump_info(); - if (swsusp_info.version_code != LINUX_VERSION_CODE) - reason = "kernel version"; - if (swsusp_info.num_physpages != num_physpages) - reason = "memory size"; - if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname)) - reason = "system type"; - if (strcmp(swsusp_info.uts.release,system_utsname.release)) - reason = "kernel release"; - if (strcmp(swsusp_info.uts.version,system_utsname.version)) - reason = "version"; - if (strcmp(swsusp_info.uts.machine,system_utsname.machine)) - reason = "machine"; - if (reason) { - printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); - return -EPERM; - } - return 0; -} - -/** - * load_image_data - load the image data using the swap map handle - * @handle and store them using the page backup list @pblist - * (assume there are @nr_pages pages to load) - */ - -static int load_image_data(struct pbe *pblist, - struct swap_map_handle *handle, - unsigned int nr_pages) -{ - int error; - unsigned int m; - struct pbe *p; - - if (!pblist) - return -EINVAL; - printk("Loading image data pages (%u pages) ... ", nr_pages); - m = nr_pages / 100; - if (!m) - m = 1; - nr_pages = 0; - p = pblist; - while (p) { - error = swap_map_read_page(handle, (void *)p->address); - if (error) - break; - p = p->next; - if (!(nr_pages % m)) - printk("\b\b\b\b%3d%%", nr_pages / m); - nr_pages++; - } - if (!error) - printk("\b\b\b\bdone\n"); - return error; -} - -/** - * unpack_orig_addresses - copy the elements of @buf[] (1 page) to - * the PBEs in the list starting at @pbe - */ - -static inline struct pbe *unpack_orig_addresses(unsigned long *buf, - struct pbe *pbe) -{ - int j; - - for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { - pbe->orig_address = buf[j]; - pbe = pbe->next; - } - return pbe; -} - -/** - * load_image_metadata - load the image metadata using the swap map - * handle @handle and put them into the PBEs in the list @pblist - */ - -static int load_image_metadata(struct pbe *pblist, struct swap_map_handle *handle) -{ - struct pbe *p; - unsigned long *buf; - unsigned int n = 0; - int error = 0; - - printk("Loading image metadata ... "); - buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC); - if (!buf) - return -ENOMEM; - p = pblist; - while (p) { - error = swap_map_read_page(handle, buf); - if (error) - break; - p = unpack_orig_addresses(buf, p); - n++; - } - free_page((unsigned long)buf); - if (!error) - printk("done (%u pages loaded)\n", n); - return error; -} - -int swsusp_read(struct pbe **pblist_ptr) -{ - int error; - struct pbe *p, *pblist; - struct swap_map_handle handle; - unsigned int nr_pages; - - if (IS_ERR(resume_bdev)) { - pr_debug("swsusp: block device not initialised\n"); - return PTR_ERR(resume_bdev); - } - - error = get_swap_map_reader(&handle, swsusp_header.image); - if (!error) - error = swap_map_read_page(&handle, &swsusp_info); - if (!error) - error = check_header(); - if (error) - return error; - nr_pages = swsusp_info.image_pages; - p = alloc_pagedir(nr_pages, GFP_ATOMIC, 0); - if (!p) - return -ENOMEM; - error = load_image_metadata(p, &handle); - if (!error) { - mark_unsafe_pages(p); - pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); - if (pblist) - copy_page_backup_list(pblist, p); - free_pagedir(p); - if (!pblist) - error = -ENOMEM; - - /* Allocate memory for the image and read the data from swap */ - if (!error) - error = alloc_data_pages(pblist, GFP_ATOMIC, 1); - if (!error) { - release_eaten_pages(); - error = load_image_data(pblist, &handle, nr_pages); - } - if (!error) - *pblist_ptr = pblist; - } - release_swap_map_reader(&handle); - - blkdev_put(resume_bdev); - - if (!error) - pr_debug("swsusp: Reading resume file was successful\n"); - else - pr_debug("swsusp: Error %d resuming\n", error); - return error; -} - -/** - * swsusp_check - Check for swsusp signature in the resume device - */ - -int swsusp_check(void) -{ - int error; - - resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); - if (!IS_ERR(resume_bdev)) { - set_blocksize(resume_bdev, PAGE_SIZE); - memset(&swsusp_header, 0, sizeof(swsusp_header)); - if ((error = bio_read_page(0, &swsusp_header))) - return error; - if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { - memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); - /* Reset swap signature now */ - error = bio_write_page(0, &swsusp_header); - } else { - return -EINVAL; - } - if (error) - blkdev_put(resume_bdev); - else - pr_debug("swsusp: Signature found, resuming\n"); - } else { - error = PTR_ERR(resume_bdev); - } - - if (error) - pr_debug("swsusp: Error %d check for resume file\n", error); - - return error; -} - -/** - * swsusp_close - close swap device. - */ - -void swsusp_close(void) -{ - if (IS_ERR(resume_bdev)) { - pr_debug("swsusp: block device not initialised\n"); - return; - } - - blkdev_put(resume_bdev); -} diff --git a/kernel/power/user.c b/kernel/power/user.c new file mode 100644 index 00000000000..3f1539fbe48 --- /dev/null +++ b/kernel/power/user.c @@ -0,0 +1,333 @@ +/* + * linux/kernel/power/user.c + * + * This file provides the user space interface for software suspend/resume. + * + * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> + * + * This file is released under the GPLv2. + * + */ + +#include <linux/suspend.h> +#include <linux/syscalls.h> +#include <linux/string.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <linux/pm.h> +#include <linux/fs.h> + +#include <asm/uaccess.h> + +#include "power.h" + +#define SNAPSHOT_MINOR 231 + +static struct snapshot_data { + struct snapshot_handle handle; + int swap; + struct bitmap_page *bitmap; + int mode; + char frozen; + char ready; +} snapshot_state; + +static atomic_t device_available = ATOMIC_INIT(1); + +static int snapshot_open(struct inode *inode, struct file *filp) +{ + struct snapshot_data *data; + + if (!atomic_add_unless(&device_available, -1, 0)) + return -EBUSY; + + if ((filp->f_flags & O_ACCMODE) == O_RDWR) + return -ENOSYS; + + nonseekable_open(inode, filp); + data = &snapshot_state; + filp->private_data = data; + memset(&data->handle, 0, sizeof(struct snapshot_handle)); + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { + data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device) : -1; + data->mode = O_RDONLY; + } else { + data->swap = -1; + data->mode = O_WRONLY; + } + data->bitmap = NULL; + data->frozen = 0; + data->ready = 0; + + return 0; +} + +static int snapshot_release(struct inode *inode, struct file *filp) +{ + struct snapshot_data *data; + + swsusp_free(); + data = filp->private_data; + free_all_swap_pages(data->swap, data->bitmap); + free_bitmap(data->bitmap); + if (data->frozen) { + down(&pm_sem); + thaw_processes(); + enable_nonboot_cpus(); + up(&pm_sem); + } + atomic_inc(&device_available); + return 0; +} + +static ssize_t snapshot_read(struct file *filp, char __user *buf, + size_t count, loff_t *offp) +{ + struct snapshot_data *data; + ssize_t res; + + data = filp->private_data; + res = snapshot_read_next(&data->handle, count); + if (res > 0) { + if (copy_to_user(buf, data_of(data->handle), res)) + res = -EFAULT; + else + *offp = data->handle.offset; + } + return res; +} + +static ssize_t snapshot_write(struct file *filp, const char __user *buf, + size_t count, loff_t *offp) +{ + struct snapshot_data *data; + ssize_t res; + + data = filp->private_data; + res = snapshot_write_next(&data->handle, count); + if (res > 0) { + if (copy_from_user(data_of(data->handle), buf, res)) + res = -EFAULT; + else + *offp = data->handle.offset; + } + return res; +} + +static int snapshot_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int error = 0; + struct snapshot_data *data; + loff_t offset, avail; + + if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) + return -ENOTTY; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + data = filp->private_data; + + switch (cmd) { + + case SNAPSHOT_FREEZE: + if (data->frozen) + break; + down(&pm_sem); + disable_nonboot_cpus(); + if (freeze_processes()) { + thaw_processes(); + enable_nonboot_cpus(); + error = -EBUSY; + } + up(&pm_sem); + if (!error) + data->frozen = 1; + break; + + case SNAPSHOT_UNFREEZE: + if (!data->frozen) + break; + down(&pm_sem); + thaw_processes(); + enable_nonboot_cpus(); + up(&pm_sem); + data->frozen = 0; + break; + + case SNAPSHOT_ATOMIC_SNAPSHOT: + if (data->mode != O_RDONLY || !data->frozen || data->ready) { + error = -EPERM; + break; + } + down(&pm_sem); + /* Free memory before shutting down devices. */ + error = swsusp_shrink_memory(); + if (!error) { + error = device_suspend(PMSG_FREEZE); + if (!error) { + in_suspend = 1; + error = swsusp_suspend(); + device_resume(); + } + } + up(&pm_sem); + if (!error) + error = put_user(in_suspend, (unsigned int __user *)arg); + if (!error) + data->ready = 1; + break; + + case SNAPSHOT_ATOMIC_RESTORE: + if (data->mode != O_WRONLY || !data->frozen || + !snapshot_image_loaded(&data->handle)) { + error = -EPERM; + break; + } + down(&pm_sem); + pm_prepare_console(); + error = device_suspend(PMSG_FREEZE); + if (!error) { + error = swsusp_resume(); + device_resume(); + } + pm_restore_console(); + up(&pm_sem); + break; + + case SNAPSHOT_FREE: + swsusp_free(); + memset(&data->handle, 0, sizeof(struct snapshot_handle)); + data->ready = 0; + break; + + case SNAPSHOT_SET_IMAGE_SIZE: + image_size = arg; + break; + + case SNAPSHOT_AVAIL_SWAP: + avail = count_swap_pages(data->swap, 1); + avail <<= PAGE_SHIFT; + error = put_user(avail, (loff_t __user *)arg); + break; + + case SNAPSHOT_GET_SWAP_PAGE: + if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { + error = -ENODEV; + break; + } + if (!data->bitmap) { + data->bitmap = alloc_bitmap(count_swap_pages(data->swap, 0)); + if (!data->bitmap) { + error = -ENOMEM; + break; + } + } + offset = alloc_swap_page(data->swap, data->bitmap); + if (offset) { + offset <<= PAGE_SHIFT; + error = put_user(offset, (loff_t __user *)arg); + } else { + error = -ENOSPC; + } + break; + + case SNAPSHOT_FREE_SWAP_PAGES: + if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { + error = -ENODEV; + break; + } + free_all_swap_pages(data->swap, data->bitmap); + free_bitmap(data->bitmap); + data->bitmap = NULL; + break; + + case SNAPSHOT_SET_SWAP_FILE: + if (!data->bitmap) { + /* + * User space encodes device types as two-byte values, + * so we need to recode them + */ + if (old_decode_dev(arg)) { + data->swap = swap_type_of(old_decode_dev(arg)); + if (data->swap < 0) + error = -ENODEV; + } else { + data->swap = -1; + error = -EINVAL; + } + } else { + error = -EPERM; + } + break; + + case SNAPSHOT_S2RAM: + if (!data->frozen) { + error = -EPERM; + break; + } + + if (down_trylock(&pm_sem)) { + error = -EBUSY; + break; + } + + if (pm_ops->prepare) { + error = pm_ops->prepare(PM_SUSPEND_MEM); + if (error) + goto OutS3; + } + + /* Put devices to sleep */ + error = device_suspend(PMSG_SUSPEND); + if (error) { + printk(KERN_ERR "Failed to suspend some devices.\n"); + } else { + /* Enter S3, system is already frozen */ + suspend_enter(PM_SUSPEND_MEM); + + /* Wake up devices */ + device_resume(); + } + + if (pm_ops->finish) + pm_ops->finish(PM_SUSPEND_MEM); + +OutS3: + up(&pm_sem); + break; + + default: + error = -ENOTTY; + + } + + return error; +} + +static struct file_operations snapshot_fops = { + .open = snapshot_open, + .release = snapshot_release, + .read = snapshot_read, + .write = snapshot_write, + .llseek = no_llseek, + .ioctl = snapshot_ioctl, +}; + +static struct miscdevice snapshot_device = { + .minor = SNAPSHOT_MINOR, + .name = "snapshot", + .fops = &snapshot_fops, +}; + +static int __init snapshot_device_init(void) +{ + return misc_register(&snapshot_device); +}; + +device_initcall(snapshot_device_init); diff --git a/kernel/profile.c b/kernel/profile.c index f89248e6d70..ad81f799a9b 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -23,6 +23,7 @@ #include <linux/cpu.h> #include <linux/profile.h> #include <linux/highmem.h> +#include <linux/mutex.h> #include <asm/sections.h> #include <asm/semaphore.h> @@ -44,7 +45,7 @@ static cpumask_t prof_cpu_mask = CPU_MASK_ALL; #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); static DEFINE_PER_CPU(int, cpu_profile_flip); -static DECLARE_MUTEX(profile_flip_mutex); +static DEFINE_MUTEX(profile_flip_mutex); #endif /* CONFIG_SMP */ static int __init profile_setup(char * str) @@ -243,7 +244,7 @@ static void profile_flip_buffers(void) { int i, j, cpu; - down(&profile_flip_mutex); + mutex_lock(&profile_flip_mutex); j = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); on_each_cpu(__profile_flip_buffers, NULL, 0, 1); @@ -259,14 +260,14 @@ static void profile_flip_buffers(void) hits[i].hits = hits[i].pc = 0; } } - up(&profile_flip_mutex); + mutex_unlock(&profile_flip_mutex); } static void profile_discard_flip_buffers(void) { int i, cpu; - down(&profile_flip_mutex); + mutex_lock(&profile_flip_mutex); i = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); on_each_cpu(__profile_flip_buffers, NULL, 0, 1); @@ -274,7 +275,7 @@ static void profile_discard_flip_buffers(void) struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); } - up(&profile_flip_mutex); + mutex_unlock(&profile_flip_mutex); } void profile_hit(int type, void *__pc) diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index fedf5e36975..6df1559b1c0 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -47,15 +47,16 @@ #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/cpu.h> +#include <linux/mutex.h> /* Definition for rcupdate control block. */ -struct rcu_ctrlblk rcu_ctrlblk = { +static struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, .completed = -300, .lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE, }; -struct rcu_ctrlblk rcu_bh_ctrlblk = { +static struct rcu_ctrlblk rcu_bh_ctrlblk = { .cur = -300, .completed = -300, .lock = SPIN_LOCK_UNLOCKED, @@ -75,7 +76,7 @@ static int rsinterval = 1000; #endif static atomic_t rcu_barrier_cpu_count; -static struct semaphore rcu_barrier_sema; +static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; #ifdef CONFIG_SMP @@ -207,13 +208,13 @@ static void rcu_barrier_func(void *notused) void rcu_barrier(void) { BUG_ON(in_interrupt()); - /* Take cpucontrol semaphore to protect against CPU hotplug */ - down(&rcu_barrier_sema); + /* Take cpucontrol mutex to protect against CPU hotplug */ + mutex_lock(&rcu_barrier_mutex); init_completion(&rcu_barrier_completion); atomic_set(&rcu_barrier_cpu_count, 0); on_each_cpu(rcu_barrier_func, NULL, 0, 1); wait_for_completion(&rcu_barrier_completion); - up(&rcu_barrier_sema); + mutex_unlock(&rcu_barrier_mutex); } EXPORT_SYMBOL_GPL(rcu_barrier); @@ -549,7 +550,6 @@ static struct notifier_block __devinitdata rcu_nb = { */ void __init rcu_init(void) { - sema_init(&rcu_barrier_sema, 1); rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)smp_processor_id()); /* Register notifier for non-boot CPUs */ diff --git a/kernel/sched.c b/kernel/sched.c index 6b6e0d70eb3..7ffaabd64f8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -237,6 +237,7 @@ struct runqueue { task_t *migration_thread; struct list_head migration_queue; + int cpu; #endif #ifdef CONFIG_SCHEDSTATS @@ -1654,6 +1655,9 @@ unsigned long nr_iowait(void) /* * double_rq_lock - safely lock two runqueues * + * We must take them in cpu order to match code in + * dependent_sleeper and wake_dependent_sleeper. + * * Note this does not disable interrupts like task_rq_lock, * you need to do so manually before calling. */ @@ -1665,7 +1669,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) spin_lock(&rq1->lock); __acquire(rq2->lock); /* Fake it out ;) */ } else { - if (rq1 < rq2) { + if (rq1->cpu < rq2->cpu) { spin_lock(&rq1->lock); spin_lock(&rq2->lock); } else { @@ -1701,7 +1705,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) __acquires(this_rq->lock) { if (unlikely(!spin_trylock(&busiest->lock))) { - if (busiest < this_rq) { + if (busiest->cpu < this_rq->cpu) { spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); @@ -2869,7 +2873,7 @@ asmlinkage void __sched schedule(void) */ if (likely(!current->exit_state)) { if (unlikely(in_atomic())) { - printk(KERN_ERR "scheduling while atomic: " + printk(KERN_ERR "BUG: scheduling while atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); dump_stack(); @@ -6029,6 +6033,7 @@ void __init sched_init(void) rq->push_cpu = 0; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); + rq->cpu = i; #endif atomic_set(&rq->nr_iowait, 0); @@ -6069,7 +6074,7 @@ void __might_sleep(char *file, int line) if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; - printk(KERN_ERR "Debug: sleeping function called from invalid" + printk(KERN_ERR "BUG: sleeping function called from invalid" " context at %s:%d\n", file, line); printk("in_atomic():%d, irqs_disabled():%d\n", in_atomic(), irqs_disabled()); diff --git a/kernel/signal.c b/kernel/signal.c index ea154104a00..75f7341b0c3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1922,6 +1922,8 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, sigset_t *mask = ¤t->blocked; int signr = 0; + try_to_freeze(); + relock: spin_lock_irq(¤t->sighand->siglock); for (;;) { @@ -2099,10 +2101,11 @@ long do_no_restart_syscall(struct restart_block *param) int sigprocmask(int how, sigset_t *set, sigset_t *oldset) { int error; - sigset_t old_block; spin_lock_irq(¤t->sighand->siglock); - old_block = current->blocked; + if (oldset) + *oldset = current->blocked; + error = 0; switch (how) { case SIG_BLOCK: @@ -2119,8 +2122,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) } recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (oldset) - *oldset = old_block; + return error; } @@ -2307,7 +2309,6 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, timeout = schedule_timeout_interruptible(timeout); - try_to_freeze(); spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(current, &these, &info); current->blocked = current->real_blocked; diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 0375fcd5921..d1b810782bc 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -179,16 +179,16 @@ EXPORT_SYMBOL(_write_lock); #define BUILD_LOCK_OPS(op, locktype) \ void __lockfunc _##op##_lock(locktype##_t *lock) \ { \ - preempt_disable(); \ for (;;) { \ + preempt_disable(); \ if (likely(_raw_##op##_trylock(lock))) \ break; \ preempt_enable(); \ + \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ cpu_relax(); \ - preempt_disable(); \ } \ (lock)->break_lock = 0; \ } \ @@ -199,19 +199,18 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ { \ unsigned long flags; \ \ - preempt_disable(); \ for (;;) { \ + preempt_disable(); \ local_irq_save(flags); \ if (likely(_raw_##op##_trylock(lock))) \ break; \ local_irq_restore(flags); \ - \ preempt_enable(); \ + \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ cpu_relax(); \ - preempt_disable(); \ } \ (lock)->break_lock = 0; \ return flags; \ diff --git a/kernel/sys.c b/kernel/sys.c index f91218a5463..c0fcad9f826 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1227,7 +1227,7 @@ asmlinkage long sys_setsid(void) struct pid *pid; int err = -EPERM; - down(&tty_sem); + mutex_lock(&tty_mutex); write_lock_irq(&tasklist_lock); pid = find_pid(PIDTYPE_PGID, group_leader->pid); @@ -1241,7 +1241,7 @@ asmlinkage long sys_setsid(void) err = process_group(group_leader); out: write_unlock_irq(&tasklist_lock); - up(&tty_sem); + mutex_unlock(&tty_mutex); return err; } @@ -1677,9 +1677,6 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) * a lot simpler! (Which we're not doing right now because we're not * measuring them yet). * - * This expects to be called with tasklist_lock read-locked or better, - * and the siglock not locked. It may momentarily take the siglock. - * * When sampling multiple threads for RUSAGE_SELF, under SMP we might have * races with threads incrementing their own counters. But since word * reads are atomic, we either get new values or old values and we don't @@ -1687,6 +1684,25 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) * the c* fields from p->signal from races with exit.c updating those * fields when reaping, so a sample either gets all the additions of a * given child after it's reaped, or none so this sample is before reaping. + * + * tasklist_lock locking optimisation: + * If we are current and single threaded, we do not need to take the tasklist + * lock or the siglock. No one else can take our signal_struct away, + * no one else can reap the children to update signal->c* counters, and + * no one else can race with the signal-> fields. + * If we do not take the tasklist_lock, the signal-> fields could be read + * out of order while another thread was just exiting. So we place a + * read memory barrier when we avoid the lock. On the writer side, + * write memory barrier is implied in __exit_signal as __exit_signal releases + * the siglock spinlock after updating the signal-> fields. + * + * We don't really need the siglock when we access the non c* fields + * of the signal_struct (for RUSAGE_SELF) even in multithreaded + * case, since we take the tasklist lock for read and the non c* signal-> + * fields are updated only in __exit_signal, which is called with + * tasklist_lock taken for write, hence these two threads cannot execute + * concurrently. + * */ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) @@ -1694,13 +1710,23 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) struct task_struct *t; unsigned long flags; cputime_t utime, stime; + int need_lock = 0; memset((char *) r, 0, sizeof *r); + utime = stime = cputime_zero; - if (unlikely(!p->signal)) - return; + if (p != current || !thread_group_empty(p)) + need_lock = 1; - utime = stime = cputime_zero; + if (need_lock) { + read_lock(&tasklist_lock); + if (unlikely(!p->signal)) { + read_unlock(&tasklist_lock); + return; + } + } else + /* See locking comments above */ + smp_rmb(); switch (who) { case RUSAGE_BOTH: @@ -1740,6 +1766,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) BUG(); } + if (need_lock) + read_unlock(&tasklist_lock); cputime_to_timeval(utime, &r->ru_utime); cputime_to_timeval(stime, &r->ru_stime); } @@ -1747,9 +1775,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) int getrusage(struct task_struct *p, int who, struct rusage __user *ru) { struct rusage r; - read_lock(&tasklist_lock); k_getrusage(p, who, &r); - read_unlock(&tasklist_lock); return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index f5fef948a41..f8ac9fa95de 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c @@ -44,12 +44,13 @@ #include <linux/module.h> #include <linux/rslib.h> #include <linux/slab.h> +#include <linux/mutex.h> #include <asm/semaphore.h> /* This list holds all currently allocated rs control structures */ static LIST_HEAD (rslist); /* Protection for the list */ -static DECLARE_MUTEX(rslistlock); +static DEFINE_MUTEX(rslistlock); /** * rs_init - Initialize a Reed-Solomon codec @@ -161,7 +162,7 @@ errrs: */ void free_rs(struct rs_control *rs) { - down(&rslistlock); + mutex_lock(&rslistlock); rs->users--; if(!rs->users) { list_del(&rs->list); @@ -170,7 +171,7 @@ void free_rs(struct rs_control *rs) kfree(rs->genpoly); kfree(rs); } - up(&rslistlock); + mutex_unlock(&rslistlock); } /** @@ -201,7 +202,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, if (nroots < 0 || nroots >= (1<<symsize)) return NULL; - down(&rslistlock); + mutex_lock(&rslistlock); /* Walk through the list and look for a matching entry */ list_for_each(tmp, &rslist) { @@ -228,7 +229,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, list_add(&rs->list, &rslist); } out: - up(&rslistlock); + mutex_unlock(&rslistlock); return rs; } diff --git a/mm/readahead.c b/mm/readahead.c index 301b36c4a0c..0f142a40984 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -555,6 +555,7 @@ recheck: out: return ra->prev_page + 1; } +EXPORT_SYMBOL_GPL(page_cache_readahead); /* * handle_ra_miss() is called when it is known that a page which should have diff --git a/mm/swapfile.c b/mm/swapfile.c index 365ed6ff182..39aa9d12961 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -45,7 +45,7 @@ static const char Unused_offset[] = "Unused swap offset entry "; struct swap_list_t swap_list = {-1, -1}; -struct swap_info_struct swap_info[MAX_SWAPFILES]; +static struct swap_info_struct swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); @@ -417,6 +417,61 @@ void free_swap_and_cache(swp_entry_t entry) } } +#ifdef CONFIG_SOFTWARE_SUSPEND +/* + * Find the swap type that corresponds to given device (if any) + * + * This is needed for software suspend and is done in such a way that inode + * aliasing is allowed. + */ +int swap_type_of(dev_t device) +{ + int i; + + spin_lock(&swap_lock); + for (i = 0; i < nr_swapfiles; i++) { + struct inode *inode; + + if (!(swap_info[i].flags & SWP_WRITEOK)) + continue; + if (!device) { + spin_unlock(&swap_lock); + return i; + } + inode = swap_info->swap_file->f_dentry->d_inode; + if (S_ISBLK(inode->i_mode) && + device == MKDEV(imajor(inode), iminor(inode))) { + spin_unlock(&swap_lock); + return i; + } + } + spin_unlock(&swap_lock); + return -ENODEV; +} + +/* + * Return either the total number of swap pages of given type, or the number + * of free pages of that type (depending on @free) + * + * This is needed for software suspend + */ +unsigned int count_swap_pages(int type, int free) +{ + unsigned int n = 0; + + if (type < nr_swapfiles) { + spin_lock(&swap_lock); + if (swap_info[type].flags & SWP_WRITEOK) { + n = swap_info[type].pages; + if (free) + n -= swap_info[type].inuse_pages; + } + spin_unlock(&swap_lock); + } + return n; +} +#endif + /* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8eedaedba74..c23e9c06ee2 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -106,6 +106,9 @@ * * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> * 050103 + * + * MPLS support by Steven Whitehouse <steve@chygwyn.com> + * */ #include <linux/sys.h> #include <linux/types.h> @@ -154,7 +157,7 @@ #include <asm/div64.h> /* do_div */ #include <asm/timex.h> -#define VERSION "pktgen v2.66: Packet Generator for packet performance testing.\n" +#define VERSION "pktgen v2.67: Packet Generator for packet performance testing.\n" /* #define PG_DEBUG(a) a */ #define PG_DEBUG(a) @@ -162,6 +165,8 @@ /* The buckets are exponential in 'width' */ #define LAT_BUCKETS_MAX 32 #define IP_NAME_SZ 32 +#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ +#define MPLS_STACK_BOTTOM __constant_htonl(0x00000100) /* Device flag bits */ #define F_IPSRC_RND (1<<0) /* IP-Src Random */ @@ -172,6 +177,7 @@ #define F_MACDST_RND (1<<5) /* MAC-Dst Random */ #define F_TXSIZE_RND (1<<6) /* Transmit size is random */ #define F_IPV6 (1<<7) /* Interface in IPV6 Mode */ +#define F_MPLS_RND (1<<8) /* Random MPLS labels */ /* Thread control flag bits */ #define T_TERMINATE (1<<0) @@ -278,6 +284,10 @@ struct pktgen_dev { __u16 udp_dst_min; /* inclusive, dest UDP port */ __u16 udp_dst_max; /* exclusive, dest UDP port */ + /* MPLS */ + unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ + __be32 labels[MAX_MPLS_LABELS]; + __u32 src_mac_count; /* How many MACs to iterate through */ __u32 dst_mac_count; /* How many MACs to iterate through */ @@ -623,9 +633,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v) pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); seq_printf(seq, - " src_mac_count: %d dst_mac_count: %d \n Flags: ", + " src_mac_count: %d dst_mac_count: %d\n", pkt_dev->src_mac_count, pkt_dev->dst_mac_count); + if (pkt_dev->nr_labels) { + unsigned i; + seq_printf(seq, " mpls: "); + for(i = 0; i < pkt_dev->nr_labels; i++) + seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), + i == pkt_dev->nr_labels-1 ? "\n" : ", "); + } + + seq_printf(seq, " Flags: "); + if (pkt_dev->flags & F_IPV6) seq_printf(seq, "IPV6 "); @@ -644,6 +664,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_UDPDST_RND) seq_printf(seq, "UDPDST_RND "); + if (pkt_dev->flags & F_MPLS_RND) + seq_printf(seq, "MPLS_RND "); + if (pkt_dev->flags & F_MACSRC_RND) seq_printf(seq, "MACSRC_RND "); @@ -691,6 +714,29 @@ static int pktgen_if_show(struct seq_file *seq, void *v) return 0; } + +static int hex32_arg(const char __user *user_buffer, __u32 *num) +{ + int i = 0; + *num = 0; + + for(; i < 8; i++) { + char c; + *num <<= 4; + if (get_user(c, &user_buffer[i])) + return -EFAULT; + if ((c >= '0') && (c <= '9')) + *num |= c - '0'; + else if ((c >= 'a') && (c <= 'f')) + *num |= c - 'a' + 10; + else if ((c >= 'A') && (c <= 'F')) + *num |= c - 'A' + 10; + else + break; + } + return i; +} + static int count_trail_chars(const char __user * user_buffer, unsigned int maxlen) { @@ -759,6 +805,35 @@ done_str: return i; } +static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) +{ + unsigned n = 0; + char c; + ssize_t i = 0; + int len; + + pkt_dev->nr_labels = 0; + do { + __u32 tmp; + len = hex32_arg(&buffer[i], &tmp); + if (len <= 0) + return len; + pkt_dev->labels[n] = htonl(tmp); + if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) + pkt_dev->flags |= F_MPLS_RND; + i += len; + if (get_user(c, &buffer[i])) + return -EFAULT; + i++; + n++; + if (n >= MAX_MPLS_LABELS) + return -E2BIG; + } while(c == ','); + + pkt_dev->nr_labels = n; + return i; +} + static ssize_t pktgen_if_write(struct file *file, const char __user * user_buffer, size_t count, loff_t * offset) @@ -1059,6 +1134,12 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "!MACDST_RND") == 0) pkt_dev->flags &= ~F_MACDST_RND; + else if (strcmp(f, "MPLS_RND") == 0) + pkt_dev->flags |= F_MPLS_RND; + + else if (strcmp(f, "!MPLS_RND") == 0) + pkt_dev->flags &= ~F_MPLS_RND; + else { sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", @@ -1354,6 +1435,19 @@ static ssize_t pktgen_if_write(struct file *file, return count; } + if (!strcmp(name, "mpls")) { + unsigned n, offset; + len = get_labels(&user_buffer[i], pkt_dev); + if (len < 0) { return len; } + i += len; + offset = sprintf(pg_result, "OK: mpls="); + for(n = 0; n < pkt_dev->nr_labels; n++) + offset += sprintf(pg_result + offset, + "%08x%s", ntohl(pkt_dev->labels[n]), + n == pkt_dev->nr_labels-1 ? "" : ","); + return count; + } + sprintf(pkt_dev->result, "No such parameter \"%s\"", name); return -EINVAL; } @@ -1846,6 +1940,15 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) pkt_dev->hh[1] = tmp; } + if (pkt_dev->flags & F_MPLS_RND) { + unsigned i; + for(i = 0; i < pkt_dev->nr_labels; i++) + if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) + pkt_dev->labels[i] = MPLS_STACK_BOTTOM | + (pktgen_random() & + htonl(0x000fffff)); + } + if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { if (pkt_dev->flags & F_UDPSRC_RND) pkt_dev->cur_udp_src = @@ -1968,6 +2071,16 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) pkt_dev->flows[flow].count++; } +static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) +{ + unsigned i; + for(i = 0; i < pkt_dev->nr_labels; i++) { + *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; + } + mpls--; + *mpls |= MPLS_STACK_BOTTOM; +} + static struct sk_buff *fill_packet_ipv4(struct net_device *odev, struct pktgen_dev *pkt_dev) { @@ -1977,6 +2090,11 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, int datalen, iplen; struct iphdr *iph; struct pktgen_hdr *pgh = NULL; + __be16 protocol = __constant_htons(ETH_P_IP); + __be32 *mpls; + + if (pkt_dev->nr_labels) + protocol = __constant_htons(ETH_P_MPLS_UC); /* Update any of the values, used when we're incrementing various * fields. @@ -1984,7 +2102,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, mod_cur_headers(pkt_dev); datalen = (odev->hard_header_len + 16) & ~0xf; - skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen, GFP_ATOMIC); + skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + + pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; @@ -1994,13 +2113,18 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, /* Reserve for ethernet and IP header */ eth = (__u8 *) skb_push(skb, 14); + mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); + if (pkt_dev->nr_labels) + mpls_push(mpls, pkt_dev); iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)); udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); memcpy(eth, pkt_dev->hh, 12); - *(u16 *) & eth[12] = __constant_htons(ETH_P_IP); + *(u16 *) & eth[12] = protocol; - datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8; /* Eth + IPh + UDPh */ + /* Eth + IPh + UDPh + mpls */ + datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - + pkt_dev->nr_labels*sizeof(u32); if (datalen < sizeof(struct pktgen_hdr)) datalen = sizeof(struct pktgen_hdr); @@ -2021,8 +2145,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, iph->tot_len = htons(iplen); iph->check = 0; iph->check = ip_fast_csum((void *)iph, iph->ihl); - skb->protocol = __constant_htons(ETH_P_IP); - skb->mac.raw = ((u8 *) iph) - 14; + skb->protocol = protocol; + skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); skb->dev = odev; skb->pkt_type = PACKET_HOST; @@ -2274,13 +2398,19 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, int datalen; struct ipv6hdr *iph; struct pktgen_hdr *pgh = NULL; + __be16 protocol = __constant_htons(ETH_P_IPV6); + __be32 *mpls; + + if (pkt_dev->nr_labels) + protocol = __constant_htons(ETH_P_MPLS_UC); /* Update any of the values, used when we're incrementing various * fields. */ mod_cur_headers(pkt_dev); - skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16, GFP_ATOMIC); + skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + + pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; @@ -2290,13 +2420,19 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, /* Reserve for ethernet and IP header */ eth = (__u8 *) skb_push(skb, 14); + mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); + if (pkt_dev->nr_labels) + mpls_push(mpls, pkt_dev); iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr)); udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); memcpy(eth, pkt_dev->hh, 12); *(u16 *) & eth[12] = __constant_htons(ETH_P_IPV6); - datalen = pkt_dev->cur_pkt_size - 14 - sizeof(struct ipv6hdr) - sizeof(struct udphdr); /* Eth + IPh + UDPh */ + /* Eth + IPh + UDPh + mpls */ + datalen = pkt_dev->cur_pkt_size - 14 - + sizeof(struct ipv6hdr) - sizeof(struct udphdr) - + pkt_dev->nr_labels*sizeof(u32); if (datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); @@ -2320,8 +2456,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); - skb->mac.raw = ((u8 *) iph) - 14; - skb->protocol = __constant_htons(ETH_P_IPV6); + skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); + skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 768e8f5d7da..ec566f3e66c 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -104,6 +104,8 @@ static struct hlist_head fib_rules; /* writer func called from netlink -- rtnl_sem hold*/ +static void rtmsg_rule(int, struct fib_rule *); + int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct rtattr **rta = arg; @@ -131,6 +133,7 @@ int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) hlist_del_rcu(&r->hlist); r->r_dead = 1; + rtmsg_rule(RTM_DELRULE, r); fib_rule_put(r); err = 0; break; @@ -253,6 +256,7 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) else hlist_add_before_rcu(&new_r->hlist, &r->hlist); + rtmsg_rule(RTM_NEWRULE, new_r); return 0; } @@ -382,14 +386,14 @@ static struct notifier_block fib_rules_notifier = { static __inline__ int inet_fill_rule(struct sk_buff *skb, struct fib_rule *r, - struct netlink_callback *cb, + u32 pid, u32 seq, int event, unsigned int flags) { struct rtmsg *rtm; struct nlmsghdr *nlh; unsigned char *b = skb->tail; - nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWRULE, sizeof(*rtm), flags); + nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); rtm = NLMSG_DATA(nlh); rtm->rtm_family = AF_INET; rtm->rtm_dst_len = r->r_dst_len; @@ -430,6 +434,21 @@ rtattr_failure: /* callers should hold rtnl semaphore */ +static void rtmsg_rule(int event, struct fib_rule *r) +{ + int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128); + struct sk_buff *skb = alloc_skb(size, GFP_KERNEL); + + if (!skb) + netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS); + else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) { + kfree_skb(skb); + netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL); + } else { + netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL); + } +} + int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0; @@ -442,7 +461,9 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) if (idx < s_idx) continue; - if (inet_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) + if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_NEWRULE, NLM_F_MULTI) < 0) break; idx++; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 4fbc40b13f1..e46048974f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -161,7 +161,7 @@ int ip6_output(struct sk_buff *skb) int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, struct ipv6_txoptions *opt, int ipfragok) { - struct ipv6_pinfo *np = sk ? inet6_sk(sk) : NULL; + struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl->fl6_dst; struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 2b670479dde..78e052591fa 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -347,8 +347,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) if (n->ht_down) n->ht_down->refcnt--; #ifdef CONFIG_CLS_U32_PERF - if (n) - kfree(n->pf); + kfree(n->pf); #endif kfree(n); return 0; @@ -680,8 +679,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, return 0; } #ifdef CONFIG_CLS_U32_PERF - if (n) - kfree(n->pf); + kfree(n->pf); #endif kfree(n); return err; diff --git a/security/seclvl.c b/security/seclvl.c index 8529ea6f7aa..441beaf1bbc 100644 --- a/security/seclvl.c +++ b/security/seclvl.c @@ -8,6 +8,7 @@ * Copyright (c) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (c) 2001 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2002 International Business Machines <robb@austin.ibm.com> + * Copyright (c) 2006 Davi E. M. Arnaut <davi.arnaut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,6 +32,7 @@ #include <linux/kobject.h> #include <linux/crypto.h> #include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/gfp.h> #include <linux/sysfs.h> @@ -194,35 +196,27 @@ static unsigned char hashedPassword[SHA1_DIGEST_SIZE]; * people... */ static int -plaintext_to_sha1(unsigned char *hash, const char *plaintext, int len) +plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) { - char *pgVirtAddr; struct crypto_tfm *tfm; - struct scatterlist sg[1]; + struct scatterlist sg; if (len > PAGE_SIZE) { seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d " "characters). Largest possible is %lu " "bytes.\n", len, PAGE_SIZE); - return -ENOMEM; + return -EINVAL; } tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); if (tfm == NULL) { seclvl_printk(0, KERN_ERR, "Failed to load transform for SHA1\n"); - return -ENOSYS; + return -EINVAL; } - // Just get a new page; don't play around with page boundaries - // and scatterlists. - pgVirtAddr = (char *)__get_free_page(GFP_KERNEL); - sg[0].page = virt_to_page(pgVirtAddr); - sg[0].offset = 0; - sg[0].length = len; - strncpy(pgVirtAddr, plaintext, len); + sg_init_one(&sg, (u8 *)plaintext, len); crypto_digest_init(tfm); - crypto_digest_update(tfm, sg, 1); + crypto_digest_update(tfm, &sg, 1); crypto_digest_final(tfm, hash); crypto_free_tfm(tfm); - free_page((unsigned long)pgVirtAddr); return 0; } @@ -234,11 +228,9 @@ static ssize_t passwd_write_file(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { - int i; - unsigned char tmp[SHA1_DIGEST_SIZE]; - char *page; - int rc; + char *p; int len; + unsigned char tmp[SHA1_DIGEST_SIZE]; if (!*passwd && !*sha1_passwd) { seclvl_printk(0, KERN_ERR, "Attempt to password-unlock the " @@ -251,38 +243,39 @@ passwd_write_file(struct file * file, const char __user * buf, return -EINVAL; } - if (count < 0 || count >= PAGE_SIZE) + if (count >= PAGE_SIZE) return -EINVAL; if (*ppos != 0) return -EINVAL; - page = (char *)get_zeroed_page(GFP_KERNEL); - if (!page) + p = kmalloc(count, GFP_KERNEL); + if (!p) return -ENOMEM; len = -EFAULT; - if (copy_from_user(page, buf, count)) + if (copy_from_user(p, buf, count)) goto out; - len = strlen(page); + len = count; /* ``echo "secret" > seclvl/passwd'' includes a newline */ - if (page[len - 1] == '\n') + if (p[len - 1] == '\n') len--; /* Hash the password, then compare the hashed values */ - if ((rc = plaintext_to_sha1(tmp, page, len))) { + if ((len = plaintext_to_sha1(tmp, p, len))) { seclvl_printk(0, KERN_ERR, "Error hashing password: rc = " - "[%d]\n", rc); - return rc; - } - for (i = 0; i < SHA1_DIGEST_SIZE; i++) { - if (hashedPassword[i] != tmp[i]) - return -EPERM; + "[%d]\n", len); + goto out; } + + len = -EPERM; + if (memcmp(hashedPassword, tmp, SHA1_DIGEST_SIZE)) + goto out; + seclvl_printk(0, KERN_INFO, "Password accepted; seclvl reduced to 0.\n"); seclvl = 0; len = count; out: - free_page((unsigned long)page); + kfree (p); return len; } @@ -295,13 +288,11 @@ static struct file_operations passwd_file_ops = { */ static int seclvl_ptrace(struct task_struct *parent, struct task_struct *child) { - if (seclvl >= 0) { - if (child->pid == 1) { - seclvl_printk(1, KERN_WARNING, "Attempt to ptrace " - "the init process dissallowed in " - "secure level %d\n", seclvl); - return -EPERM; - } + if (seclvl >= 0 && child->pid == 1) { + seclvl_printk(1, KERN_WARNING, "Attempt to ptrace " + "the init process dissallowed in " + "secure level %d\n", seclvl); + return -EPERM; } return 0; } @@ -312,55 +303,54 @@ static int seclvl_ptrace(struct task_struct *parent, struct task_struct *child) */ static int seclvl_capable(struct task_struct *tsk, int cap) { + int rc = 0; + /* init can do anything it wants */ if (tsk->pid == 1) return 0; - switch (seclvl) { - case 2: - /* fall through */ - case 1: - if (cap == CAP_LINUX_IMMUTABLE) { + if (seclvl > 0) { + rc = -EPERM; + + if (cap == CAP_LINUX_IMMUTABLE) seclvl_printk(1, KERN_WARNING, "Attempt to modify " "the IMMUTABLE and/or APPEND extended " "attribute on a file with the IMMUTABLE " "and/or APPEND extended attribute set " "denied in seclvl [%d]\n", seclvl); - return -EPERM; - } else if (cap == CAP_SYS_RAWIO) { // Somewhat broad... + else if (cap == CAP_SYS_RAWIO) seclvl_printk(1, KERN_WARNING, "Attempt to perform " "raw I/O while in secure level [%d] " "denied\n", seclvl); - return -EPERM; - } else if (cap == CAP_NET_ADMIN) { + else if (cap == CAP_NET_ADMIN) seclvl_printk(1, KERN_WARNING, "Attempt to perform " "network administrative task while " "in secure level [%d] denied\n", seclvl); - return -EPERM; - } else if (cap == CAP_SETUID) { + else if (cap == CAP_SETUID) seclvl_printk(1, KERN_WARNING, "Attempt to setuid " "while in secure level [%d] denied\n", seclvl); - return -EPERM; - } else if (cap == CAP_SETGID) { + else if (cap == CAP_SETGID) seclvl_printk(1, KERN_WARNING, "Attempt to setgid " "while in secure level [%d] denied\n", seclvl); - } else if (cap == CAP_SYS_MODULE) { + else if (cap == CAP_SYS_MODULE) seclvl_printk(1, KERN_WARNING, "Attempt to perform " "a module operation while in secure " "level [%d] denied\n", seclvl); - return -EPERM; - } - break; - default: - break; + else + rc = 0; } - /* from dummy.c */ - if (cap_is_fs_cap(cap) ? tsk->fsuid == 0 : tsk->euid == 0) - return 0; /* capability granted */ - seclvl_printk(1, KERN_WARNING, "Capability denied\n"); - return -EPERM; /* capability denied */ + + if (!rc) { + if (!(cap_is_fs_cap(cap) ? tsk->fsuid == 0 : tsk->euid == 0)) + rc = -EPERM; + } + + if (rc) + seclvl_printk(1, KERN_WARNING, "Capability denied\n"); + + return rc; } /** @@ -466,12 +456,9 @@ static int seclvl_inode_setattr(struct dentry *dentry, struct iattr *iattr) static void seclvl_file_free_security(struct file *filp) { struct dentry *dentry = filp->f_dentry; - struct inode *inode = NULL; - if (dentry) { - inode = dentry->d_inode; - seclvl_bd_release(inode); - } + if (dentry) + seclvl_bd_release(dentry->d_inode); } /** @@ -479,9 +466,7 @@ static void seclvl_file_free_security(struct file *filp) */ static int seclvl_umount(struct vfsmount *mnt, int flags) { - if (current->pid == 1) - return 0; - if (seclvl == 2) { + if (current->pid != 1 && seclvl == 2) { seclvl_printk(1, KERN_WARNING, "Attempt to unmount in secure " "level %d\n", seclvl); return -EPERM; @@ -505,8 +490,9 @@ static struct security_operations seclvl_ops = { static int processPassword(void) { int rc = 0; - hashedPassword[0] = '\0'; if (*passwd) { + char *p; + if (*sha1_passwd) { seclvl_printk(0, KERN_ERR, "Error: Both " "passwd and sha1_passwd " @@ -514,12 +500,16 @@ static int processPassword(void) "exclusive.\n"); return -EINVAL; } - if ((rc = plaintext_to_sha1(hashedPassword, passwd, - strlen(passwd)))) { + + p = kstrdup(passwd, GFP_KERNEL); + if (p == NULL) + return -ENOMEM; + + if ((rc = plaintext_to_sha1(hashedPassword, p, strlen(p)))) seclvl_printk(0, KERN_ERR, "Error: SHA1 support not " "in kernel\n"); - return rc; - } + + kfree (p); /* All static data goes to the BSS, which zero's the * plaintext password out for us. */ } else if (*sha1_passwd) { // Base 16 @@ -542,7 +532,7 @@ static int processPassword(void) sha1_passwd[i + 2] = tmp; } } - return 0; + return rc; } /** @@ -552,28 +542,46 @@ struct dentry *dir_ino, *seclvl_ino, *passwd_ino; static int seclvlfs_register(void) { + int rc = 0; + dir_ino = securityfs_create_dir("seclvl", NULL); - if (!dir_ino) - return -EFAULT; + + if (IS_ERR(dir_ino)) + return PTR_ERR(dir_ino); seclvl_ino = securityfs_create_file("seclvl", S_IRUGO | S_IWUSR, dir_ino, &seclvl, &seclvl_file_ops); - if (!seclvl_ino) + if (IS_ERR(seclvl_ino)) { + rc = PTR_ERR(seclvl_ino); goto out_deldir; + } if (*passwd || *sha1_passwd) { passwd_ino = securityfs_create_file("passwd", S_IRUGO | S_IWUSR, dir_ino, NULL, &passwd_file_ops); - if (!passwd_ino) + if (IS_ERR(passwd_ino)) { + rc = PTR_ERR(passwd_ino); goto out_delf; + } } - return 0; + return rc; + +out_delf: + securityfs_remove(seclvl_ino); out_deldir: securityfs_remove(dir_ino); -out_delf: + + return rc; +} + +static void seclvlfs_unregister(void) +{ securityfs_remove(seclvl_ino); - return -EFAULT; + if (*passwd || *sha1_passwd) + securityfs_remove(passwd_ino); + + securityfs_remove(dir_ino); } /** @@ -582,6 +590,8 @@ out_delf: static int __init seclvl_init(void) { int rc = 0; + static char once; + if (verbosity < 0 || verbosity > 1) { printk(KERN_ERR "Error: bad verbosity [%d]; only 0 or 1 " "are valid values\n", verbosity); @@ -600,6 +610,11 @@ static int __init seclvl_init(void) "module parameter(s): rc = [%d]\n", rc); goto exit; } + + if ((rc = seclvlfs_register())) { + seclvl_printk(0, KERN_ERR, "Error registering with sysfs\n"); + goto exit; + } /* register ourselves with the security framework */ if (register_security(&seclvl_ops)) { seclvl_printk(0, KERN_ERR, @@ -611,20 +626,24 @@ static int __init seclvl_init(void) seclvl_printk(0, KERN_ERR, "seclvl: Failure " "registering with primary security " "module.\n"); + seclvlfs_unregister(); goto exit; } /* if primary module registered */ secondary = 1; } /* if we registered ourselves with the security framework */ - if ((rc = seclvlfs_register())) { - seclvl_printk(0, KERN_ERR, "Error registering with sysfs\n"); - goto exit; - } + seclvl_printk(0, KERN_INFO, "seclvl: Successfully initialized.\n"); + + if (once) { + once = 1; + seclvl_printk(0, KERN_INFO, "seclvl is going away. It has been " + "buggy for ages. Also, be warned that " + "Securelevels are useless."); + } exit: - if (rc) { + if (rc) printk(KERN_ERR "seclvl: Error during initialization: rc = " "[%d]\n", rc); - } return rc; } @@ -633,17 +652,14 @@ static int __init seclvl_init(void) */ static void __exit seclvl_exit(void) { - securityfs_remove(seclvl_ino); - if (*passwd || *sha1_passwd) - securityfs_remove(passwd_ino); - securityfs_remove(dir_ino); - if (secondary == 1) { + seclvlfs_unregister(); + + if (secondary) mod_unreg_security(MY_NAME, &seclvl_ops); - } else if (unregister_security(&seclvl_ops)) { + else if (unregister_security(&seclvl_ops)) seclvl_printk(0, KERN_INFO, "seclvl: Failure unregistering with the " "kernel\n"); - } } module_init(seclvl_init); diff --git a/sound/oss/ac97_codec.c b/sound/oss/ac97_codec.c index fd25aca2512..972327c9764 100644 --- a/sound/oss/ac97_codec.c +++ b/sound/oss/ac97_codec.c @@ -55,7 +55,7 @@ #include <linux/pci.h> #include <linux/ac97_codec.h> #include <asm/uaccess.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #define CODEC_ID_BUFSZ 14 @@ -304,7 +304,7 @@ static const unsigned int ac97_oss_rm[] = { static LIST_HEAD(codecs); static LIST_HEAD(codec_drivers); -static DECLARE_MUTEX(codec_sem); +static DEFINE_MUTEX(codec_mutex); /* reads the given OSS mixer from the ac97 the caller must have insured that the ac97 knows about that given mixer, and should be holding a spinlock for the card */ @@ -769,9 +769,9 @@ void ac97_release_codec(struct ac97_codec *codec) { /* Remove from the list first, we don't want to be "rediscovered" */ - down(&codec_sem); + mutex_lock(&codec_mutex); list_del(&codec->list); - up(&codec_sem); + mutex_unlock(&codec_mutex); /* * The driver needs to deal with internal * locking to avoid accidents here. @@ -889,7 +889,7 @@ int ac97_probe_codec(struct ac97_codec *codec) * callbacks. */ - down(&codec_sem); + mutex_lock(&codec_mutex); list_add(&codec->list, &codecs); list_for_each(l, &codec_drivers) { @@ -903,7 +903,7 @@ int ac97_probe_codec(struct ac97_codec *codec) } } - up(&codec_sem); + mutex_unlock(&codec_mutex); return 1; } @@ -1439,7 +1439,7 @@ int ac97_register_driver(struct ac97_driver *driver) struct list_head *l; struct ac97_codec *c; - down(&codec_sem); + mutex_lock(&codec_mutex); INIT_LIST_HEAD(&driver->list); list_add(&driver->list, &codec_drivers); @@ -1452,7 +1452,7 @@ int ac97_register_driver(struct ac97_driver *driver) continue; c->driver = driver; } - up(&codec_sem); + mutex_unlock(&codec_mutex); return 0; } @@ -1471,7 +1471,7 @@ void ac97_unregister_driver(struct ac97_driver *driver) struct list_head *l; struct ac97_codec *c; - down(&codec_sem); + mutex_lock(&codec_mutex); list_del_init(&driver->list); list_for_each(l, &codecs) @@ -1483,7 +1483,7 @@ void ac97_unregister_driver(struct ac97_driver *driver) } } - up(&codec_sem); + mutex_unlock(&codec_mutex); } EXPORT_SYMBOL_GPL(ac97_unregister_driver); @@ -1494,14 +1494,14 @@ static int swap_headphone(int remove_master) struct ac97_codec *c; if (remove_master) { - down(&codec_sem); + mutex_lock(&codec_mutex); list_for_each(l, &codecs) { c = list_entry(l, struct ac97_codec, list); if (supported_mixer(c, SOUND_MIXER_PHONEOUT)) c->supported_mixers &= ~SOUND_MASK_PHONEOUT; } - up(&codec_sem); + mutex_unlock(&codec_mutex); } else ac97_hw[SOUND_MIXER_PHONEOUT].offset = AC97_MASTER_VOL_STEREO; diff --git a/sound/oss/aci.c b/sound/oss/aci.c index 3928c2802cc..3bfac375dbd 100644 --- a/sound/oss/aci.c +++ b/sound/oss/aci.c @@ -56,7 +56,8 @@ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/slab.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/uaccess.h> #include "sound_config.h" @@ -79,7 +80,7 @@ static int aci_micpreamp=3; /* microphone preamp-level that can't be * * checked with ACI versions prior to 0xb0 */ static int mixer_device; -static struct semaphore aci_sem; +static struct mutex aci_mutex; #ifdef MODULE static int reset; @@ -212,7 +213,7 @@ int aci_rw_cmd(int write1, int write2, int write3) int write[] = {write1, write2, write3}; int read = -EINTR, i; - if (down_interruptible(&aci_sem)) + if (mutex_lock_interruptible(&aci_mutex)) goto out; for (i=0; i<3; i++) { @@ -227,7 +228,7 @@ int aci_rw_cmd(int write1, int write2, int write3) } read = aci_rawread(); -out_up: up(&aci_sem); +out_up: mutex_unlock(&aci_mutex); out: return read; } @@ -603,7 +604,7 @@ static int __init attach_aci(void) char *boardname; int i, rc = -EBUSY; - init_MUTEX(&aci_sem); + mutex_init(&aci_mutex); outb(0xE3, 0xf8f); /* Write MAD16 password */ aci_port = (inb(0xf90) & 0x10) ? diff --git a/sound/oss/ad1889.c b/sound/oss/ad1889.c index a0d73f34310..54dabf86280 100644 --- a/sound/oss/ad1889.c +++ b/sound/oss/ad1889.c @@ -38,6 +38,7 @@ #include <linux/ac97_codec.h> #include <linux/sound.h> #include <linux/interrupt.h> +#include <linux/mutex.h> #include <asm/delay.h> #include <asm/io.h> @@ -238,7 +239,7 @@ static ad1889_dev_t *ad1889_alloc_dev(struct pci_dev *pci) for (i = 0; i < AD_MAX_STATES; i++) { dev->state[i].card = dev; - init_MUTEX(&dev->state[i].sem); + mutex_init(&dev->state[i].mutex); init_waitqueue_head(&dev->state[i].dmabuf.wait); } @@ -461,7 +462,7 @@ static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t ssize_t ret = 0; DECLARE_WAITQUEUE(wait, current); - down(&state->sem); + mutex_lock(&state->mutex); #if 0 if (dmabuf->mapped) { ret = -ENXIO; @@ -546,7 +547,7 @@ static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t err2: remove_wait_queue(&state->dmabuf.wait, &wait); err1: - up(&state->sem); + mutex_unlock(&state->mutex); return ret; } diff --git a/sound/oss/ad1889.h b/sound/oss/ad1889.h index e04affce1dd..861b3213f30 100644 --- a/sound/oss/ad1889.h +++ b/sound/oss/ad1889.h @@ -100,7 +100,7 @@ typedef struct ad1889_state { unsigned int subdivision; } dmabuf; - struct semaphore sem; + struct mutex mutex; } ad1889_state_t; typedef struct ad1889_dev { diff --git a/sound/oss/ali5455.c b/sound/oss/ali5455.c index 9c9e6c0410f..62bb936b1f3 100644 --- a/sound/oss/ali5455.c +++ b/sound/oss/ali5455.c @@ -64,6 +64,8 @@ #include <linux/smp_lock.h> #include <linux/ac97_codec.h> #include <linux/interrupt.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> #ifndef PCI_DEVICE_ID_ALI_5455 @@ -234,7 +236,7 @@ struct ali_state { struct ali_card *card; /* Card info */ /* single open lock mechanism, only used for recording */ - struct semaphore open_sem; + struct mutex open_mutex; wait_queue_head_t open_wait; /* file mode */ @@ -2807,7 +2809,7 @@ found_virt: state->card = card; state->magic = ALI5455_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - init_MUTEX(&state->open_sem); + mutex_init(&state->open_mutex); file->private_data = state; dmabuf->trigger = 0; /* allocate hardware channels */ @@ -3359,7 +3361,7 @@ static void __devinit ali_configure_clocking(void) state->card = card; state->magic = ALI5455_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - init_MUTEX(&state->open_sem); + mutex_init(&state->open_mutex); dmabuf->fmt = ALI5455_FMT_STEREO | ALI5455_FMT_16BIT; dmabuf->trigger = PCM_ENABLE_OUTPUT; ali_set_dac_rate(state, 48000); diff --git a/sound/oss/au1000.c b/sound/oss/au1000.c index c407de86cbb..fe54de25aaf 100644 --- a/sound/oss/au1000.c +++ b/sound/oss/au1000.c @@ -68,6 +68,8 @@ #include <linux/smp_lock.h> #include <linux/ac97_codec.h> #include <linux/interrupt.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/uaccess.h> #include <asm/mach-au1x00/au1000.h> @@ -120,8 +122,8 @@ struct au1000_state { int no_vra; // do not use VRA spinlock_t lock; - struct semaphore open_sem; - struct semaphore sem; + struct mutex open_mutex; + struct mutex sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1106,7 +1108,7 @@ static ssize_t au1000_read(struct file *file, char *buffer, count *= db->cnt_factor; - down(&s->sem); + mutex_lock(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1125,14 +1127,14 @@ static ssize_t au1000_read(struct file *file, char *buffer, ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - down(&s->sem); + mutex_lock(&s->sem); } } while (avail <= 0); @@ -1159,7 +1161,7 @@ static ssize_t au1000_read(struct file *file, char *buffer, } // while (count > 0) out: - up(&s->sem); + mutex_unlock(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1187,7 +1189,7 @@ static ssize_t au1000_write(struct file *file, const char *buffer, count *= db->cnt_factor; - down(&s->sem); + mutex_lock(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1204,14 +1206,14 @@ static ssize_t au1000_write(struct file *file, const char *buffer, ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - down(&s->sem); + mutex_lock(&s->sem); } } while (avail <= 0); @@ -1240,7 +1242,7 @@ static ssize_t au1000_write(struct file *file, const char *buffer, } // while (count > 0) out: - up(&s->sem); + mutex_unlock(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1298,7 +1300,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma) dbg("%s", __FUNCTION__); lock_kernel(); - down(&s->sem); + mutex_lock(&s->sem); if (vma->vm_flags & VM_WRITE) db = &s->dma_dac; else if (vma->vm_flags & VM_READ) @@ -1324,7 +1326,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags &= ~VM_IO; db->mapped = 1; out: - up(&s->sem); + mutex_unlock(&s->sem); unlock_kernel(); return ret; } @@ -1829,21 +1831,21 @@ static int au1000_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } stop_dac(s); @@ -1879,8 +1881,8 @@ static int au1000_open(struct inode *inode, struct file *file) } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); - init_MUTEX(&s->sem); + mutex_unlock(&s->open_mutex); + mutex_init(&s->sem); return nonseekable_open(inode, file); } @@ -1896,7 +1898,7 @@ static int au1000_release(struct inode *inode, struct file *file) lock_kernel(); } - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1906,7 +1908,7 @@ static int au1000_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1996,7 +1998,7 @@ static int __devinit au1000_probe(void) init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->codec.private_data = s; s->codec.id = 0; diff --git a/sound/oss/au1550_ac97.c b/sound/oss/au1550_ac97.c index bdee0502f3e..6a4956b8025 100644 --- a/sound/oss/au1550_ac97.c +++ b/sound/oss/au1550_ac97.c @@ -52,6 +52,8 @@ #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/ac97_codec.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/uaccess.h> #include <asm/hardirq.h> @@ -90,8 +92,8 @@ static struct au1550_state { int no_vra; /* do not use VRA */ spinlock_t lock; - struct semaphore open_sem; - struct semaphore sem; + struct mutex open_mutex; + struct mutex sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1044,7 +1046,7 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos) count *= db->cnt_factor; - down(&s->sem); + mutex_lock(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1064,14 +1066,14 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos) ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - down(&s->sem); + mutex_lock(&s->sem); } } while (avail <= 0); @@ -1099,7 +1101,7 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos) } /* while (count > 0) */ out: - up(&s->sem); + mutex_unlock(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1125,7 +1127,7 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) count *= db->cnt_factor; - down(&s->sem); + mutex_lock(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1143,14 +1145,14 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - down(&s->sem); + mutex_lock(&s->sem); } } while (avail <= 0); @@ -1196,7 +1198,7 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) } /* while (count > 0) */ out: - up(&s->sem); + mutex_unlock(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1253,7 +1255,7 @@ au1550_mmap(struct file *file, struct vm_area_struct *vma) int ret = 0; lock_kernel(); - down(&s->sem); + mutex_lock(&s->sem); if (vma->vm_flags & VM_WRITE) db = &s->dma_dac; else if (vma->vm_flags & VM_READ) @@ -1279,7 +1281,7 @@ au1550_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags &= ~VM_IO; db->mapped = 1; out: - up(&s->sem); + mutex_unlock(&s->sem); unlock_kernel(); return ret; } @@ -1790,21 +1792,21 @@ au1550_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } stop_dac(s); @@ -1840,8 +1842,8 @@ au1550_open(struct inode *inode, struct file *file) } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); - init_MUTEX(&s->sem); + mutex_unlock(&s->open_mutex); + mutex_init(&s->sem); return 0; } @@ -1858,7 +1860,7 @@ au1550_release(struct inode *inode, struct file *file) lock_kernel(); } - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); kfree(s->dma_dac.rawbuf); @@ -1870,7 +1872,7 @@ au1550_release(struct inode *inode, struct file *file) s->dma_adc.rawbuf = NULL; } s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1902,7 +1904,7 @@ au1550_probe(void) init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->codec = ac97_alloc_codec(); diff --git a/sound/oss/btaudio.c b/sound/oss/btaudio.c index 4007a5680ac..bfe3b534ef3 100644 --- a/sound/oss/btaudio.c +++ b/sound/oss/btaudio.c @@ -32,6 +32,8 @@ #include <linux/soundcard.h> #include <linux/slab.h> #include <linux/kdev_t.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> #include <asm/io.h> @@ -108,7 +110,7 @@ struct btaudio { /* locking */ int users; - struct semaphore lock; + struct mutex lock; /* risc instructions */ unsigned int risc_size; @@ -440,7 +442,7 @@ static struct file_operations btaudio_mixer_fops = { static int btaudio_dsp_open(struct inode *inode, struct file *file, struct btaudio *bta, int analog) { - down(&bta->lock); + mutex_lock(&bta->lock); if (bta->users) goto busy; bta->users++; @@ -452,11 +454,11 @@ static int btaudio_dsp_open(struct inode *inode, struct file *file, bta->read_count = 0; bta->sampleshift = 0; - up(&bta->lock); + mutex_unlock(&bta->lock); return 0; busy: - up(&bta->lock); + mutex_unlock(&bta->lock); return -EBUSY; } @@ -496,11 +498,11 @@ static int btaudio_dsp_release(struct inode *inode, struct file *file) { struct btaudio *bta = file->private_data; - down(&bta->lock); + mutex_lock(&bta->lock); if (bta->recording) stop_recording(bta); bta->users--; - up(&bta->lock); + mutex_unlock(&bta->lock); return 0; } @@ -513,7 +515,7 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer, DECLARE_WAITQUEUE(wait, current); add_wait_queue(&bta->readq, &wait); - down(&bta->lock); + mutex_lock(&bta->lock); while (swcount > 0) { if (0 == bta->read_count) { if (!bta->recording) { @@ -528,10 +530,10 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer, ret = -EAGAIN; break; } - up(&bta->lock); + mutex_unlock(&bta->lock); current->state = TASK_INTERRUPTIBLE; schedule(); - down(&bta->lock); + mutex_lock(&bta->lock); if(signal_pending(current)) { if (0 == ret) ret = -EINTR; @@ -604,7 +606,7 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer, if (bta->read_offset == bta->buf_size) bta->read_offset = 0; } - up(&bta->lock); + mutex_unlock(&bta->lock); remove_wait_queue(&bta->readq, &wait); current->state = TASK_RUNNING; return ret; @@ -651,10 +653,10 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file, bta->decimation = 0; } if (bta->recording) { - down(&bta->lock); + mutex_lock(&bta->lock); stop_recording(bta); start_recording(bta); - up(&bta->lock); + mutex_unlock(&bta->lock); } /* fall through */ case SOUND_PCM_READ_RATE: @@ -716,10 +718,10 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file, else bta->bits = 16; if (bta->recording) { - down(&bta->lock); + mutex_lock(&bta->lock); stop_recording(bta); start_recording(bta); - up(&bta->lock); + mutex_unlock(&bta->lock); } } if (debug) @@ -736,9 +738,9 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file, case SNDCTL_DSP_RESET: if (bta->recording) { - down(&bta->lock); + mutex_lock(&bta->lock); stop_recording(bta); - up(&bta->lock); + mutex_unlock(&bta->lock); } return 0; case SNDCTL_DSP_GETBLKSIZE: @@ -941,7 +943,7 @@ static int __devinit btaudio_probe(struct pci_dev *pci_dev, if (rate) bta->rate = rate; - init_MUTEX(&bta->lock); + mutex_init(&bta->lock); init_waitqueue_head(&bta->readq); if (-1 != latency) { diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c index 7cfbb08db53..1fbd5137f6d 100644 --- a/sound/oss/cmpci.c +++ b/sound/oss/cmpci.c @@ -138,6 +138,8 @@ #endif #ifdef CONFIG_SOUND_CMPCI_JOYSTICK #include <linux/gameport.h> +#include <linux/mutex.h> + #endif /* --------------------------------------------------------------------- */ @@ -392,7 +394,7 @@ struct cm_state { unsigned char fmt, enable; spinlock_t lock; - struct semaphore open_sem; + struct mutex open_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -2825,21 +2827,21 @@ static int cm_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } if (file->f_mode & FMODE_READ) { s->status &= ~DO_BIGENDIAN_R; @@ -2867,7 +2869,7 @@ static int cm_open(struct inode *inode, struct file *file) } set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2879,7 +2881,7 @@ static int cm_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); @@ -2903,7 +2905,7 @@ static int cm_release(struct inode *inode, struct file *file) s->status &= ~DO_BIGENDIAN_R; } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -3080,7 +3082,7 @@ static int __devinit cm_probe(struct pci_dev *pcidev, const struct pci_device_id init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->magic = CM_MAGIC; s->dev = pcidev; diff --git a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c index 0720365f643..0004442f9b7 100644 --- a/sound/oss/cs4281/cs4281m.c +++ b/sound/oss/cs4281/cs4281m.c @@ -245,9 +245,9 @@ struct cs4281_state { void *tmpbuff; // tmp buffer for sample conversions unsigned ena; spinlock_t lock; - struct semaphore open_sem; - struct semaphore open_sem_adc; - struct semaphore open_sem_dac; + struct mutex open_sem; + struct mutex open_sem_adc; + struct mutex open_sem_dac; mode_t open_mode; wait_queue_head_t open_wait; wait_queue_head_t open_wait_adc; @@ -3598,20 +3598,20 @@ static int cs4281_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_WRITE) { drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem_dac); + mutex_lock(&s->open_sem_dac); stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); s->open_mode &= ~FMODE_WRITE; - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); wake_up(&s->open_wait_dac); } if (file->f_mode & FMODE_READ) { drain_adc(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem_adc); + mutex_lock(&s->open_sem_adc); stop_adc(s); dealloc_dmabuf(s, &s->dma_adc); s->open_mode &= ~FMODE_READ; - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); wake_up(&s->open_wait_adc); } return 0; @@ -3651,33 +3651,33 @@ static int cs4281_open(struct inode *inode, struct file *file) return -ENODEV; } if (file->f_mode & FMODE_WRITE) { - down(&s->open_sem_dac); + mutex_lock(&s->open_sem_dac); while (s->open_mode & FMODE_WRITE) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); return -EBUSY; } - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); interruptible_sleep_on(&s->open_wait_dac); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem_dac); + mutex_lock(&s->open_sem_dac); } } if (file->f_mode & FMODE_READ) { - down(&s->open_sem_adc); + mutex_lock(&s->open_sem_adc); while (s->open_mode & FMODE_READ) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); return -EBUSY; } - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); interruptible_sleep_on(&s->open_wait_adc); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem_adc); + mutex_lock(&s->open_sem_adc); } } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); @@ -3691,7 +3691,7 @@ static int cs4281_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_READ; s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); if (prog_dmabuf_adc(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -3711,7 +3711,7 @@ static int cs4281_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_WRITE; s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); if (prog_dmabuf_dac(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -3978,17 +3978,17 @@ static int cs4281_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; // wait for device to become free - down(&s->open_sem); + mutex_lock(&s->open_sem); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_sem); return -EBUSY; } - up(&s->open_sem); + mutex_unlock(&s->open_sem); interruptible_sleep_on(&s->open_wait); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -4018,7 +4018,7 @@ static int cs4281_midi_open(struct inode *inode, struct file *file) (file-> f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_sem); return nonseekable_open(inode, file); } @@ -4057,7 +4057,7 @@ static int cs4281_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); current->state = TASK_RUNNING; } - down(&s->open_sem); + mutex_lock(&s->open_sem); s->open_mode &= (~(file->f_mode << FMODE_MIDI_SHIFT)) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); @@ -4067,7 +4067,7 @@ static int cs4281_midi_release(struct inode *inode, struct file *file) del_timer(&s->midi.timer); } spin_unlock_irqrestore(&s->lock, flags); - up(&s->open_sem); + mutex_unlock(&s->open_sem); wake_up(&s->open_wait); return 0; } @@ -4300,9 +4300,9 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev, init_waitqueue_head(&s->open_wait_dac); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - init_MUTEX(&s->open_sem); - init_MUTEX(&s->open_sem_adc); - init_MUTEX(&s->open_sem_dac); + mutex_init(&s->open_sem); + mutex_init(&s->open_sem_adc); + mutex_init(&s->open_sem_dac); spin_lock_init(&s->lock); s->pBA0phys = pci_resource_start(pcidev, 0); s->pBA1phys = pci_resource_start(pcidev, 1); diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c index 58e25c82eaf..53881bc91bb 100644 --- a/sound/oss/cs46xx.c +++ b/sound/oss/cs46xx.c @@ -90,6 +90,7 @@ #include <linux/init.h> #include <linux/poll.h> #include <linux/ac97_codec.h> +#include <linux/mutex.h> #include <asm/io.h> #include <asm/dma.h> @@ -238,7 +239,7 @@ struct cs_state { struct cs_card *card; /* Card info */ /* single open lock mechanism, only used for recording */ - struct semaphore open_sem; + struct mutex open_mutex; wait_queue_head_t open_wait; /* file mode */ @@ -297,7 +298,7 @@ struct cs_state { unsigned subdivision; } dmabuf; /* Guard against mmap/write/read races */ - struct semaphore sem; + struct mutex sem; }; struct cs_card { @@ -375,7 +376,7 @@ struct cs_card { unsigned char ibuf[CS_MIDIINBUF]; unsigned char obuf[CS_MIDIOUTBUF]; mode_t open_mode; - struct semaphore open_sem; + struct mutex open_mutex; } midi; struct cs46xx_pm pm; }; @@ -1428,9 +1429,9 @@ static int prog_dmabuf(struct cs_state *state) { int ret; - down(&state->sem); + mutex_lock(&state->sem); ret = __prog_dmabuf(state); - up(&state->sem); + mutex_unlock(&state->sem); return ret; } @@ -1831,17 +1832,17 @@ static int cs_midi_open(struct inode *inode, struct file *file) file->private_data = card; /* wait for device to become free */ - down(&card->midi.open_sem); + mutex_lock(&card->midi.open_mutex); while (card->midi.open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&card->midi.open_sem); + mutex_unlock(&card->midi.open_mutex); return -EBUSY; } - up(&card->midi.open_sem); + mutex_unlock(&card->midi.open_mutex); interruptible_sleep_on(&card->midi.open_wait); if (signal_pending(current)) return -ERESTARTSYS; - down(&card->midi.open_sem); + mutex_lock(&card->midi.open_mutex); } spin_lock_irqsave(&card->midi.lock, flags); if (!(card->midi.open_mode & (FMODE_READ | FMODE_WRITE))) { @@ -1859,7 +1860,7 @@ static int cs_midi_open(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&card->midi.lock, flags); card->midi.open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE)); - up(&card->midi.open_sem); + mutex_unlock(&card->midi.open_mutex); return 0; } @@ -1891,9 +1892,9 @@ static int cs_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&card->midi.owait, &wait); current->state = TASK_RUNNING; } - down(&card->midi.open_sem); + mutex_lock(&card->midi.open_mutex); card->midi.open_mode &= (~(file->f_mode & (FMODE_READ | FMODE_WRITE))); - up(&card->midi.open_sem); + mutex_unlock(&card->midi.open_mutex); wake_up(&card->midi.open_wait); return 0; } @@ -2081,7 +2082,7 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - down(&state->sem); + mutex_lock(&state->sem); if (!dmabuf->ready && (ret = __prog_dmabuf(state))) goto out2; @@ -2114,13 +2115,13 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof if (!ret) ret = -EAGAIN; goto out; } - up(&state->sem); + mutex_unlock(&state->sem); schedule(); if (signal_pending(current)) { if(!ret) ret = -ERESTARTSYS; goto out; } - down(&state->sem); + mutex_lock(&state->sem); if (dmabuf->mapped) { if(!ret) @@ -2155,7 +2156,7 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof out: remove_wait_queue(&state->dmabuf.wait, &wait); out2: - up(&state->sem); + mutex_unlock(&state->sem); set_current_state(TASK_RUNNING); CS_DBGOUT(CS_WAVE_READ | CS_FUNCTION, 4, printk("cs46xx: cs_read()- %zd\n",ret) ); @@ -2184,7 +2185,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou return -EFAULT; dmabuf = &state->dmabuf; - down(&state->sem); + mutex_lock(&state->sem); if (dmabuf->mapped) { ret = -ENXIO; @@ -2240,13 +2241,13 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou if (!ret) ret = -EAGAIN; goto out; } - up(&state->sem); + mutex_unlock(&state->sem); schedule(); if (signal_pending(current)) { if(!ret) ret = -ERESTARTSYS; goto out; } - down(&state->sem); + mutex_lock(&state->sem); if (dmabuf->mapped) { if(!ret) @@ -2278,7 +2279,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou start_dac(state); } out: - up(&state->sem); + mutex_unlock(&state->sem); remove_wait_queue(&state->dmabuf.wait, &wait); set_current_state(TASK_RUNNING); @@ -2411,7 +2412,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma) goto out; } - down(&state->sem); + mutex_lock(&state->sem); dmabuf = &state->dmabuf; if (cs4x_pgoff(vma) != 0) { @@ -2438,7 +2439,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma) CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_mmap()-\n") ); out: - up(&state->sem); + mutex_unlock(&state->sem); return ret; } @@ -3200,7 +3201,7 @@ static int cs_open(struct inode *inode, struct file *file) if (state == NULL) return -ENOMEM; memset(state, 0, sizeof(struct cs_state)); - init_MUTEX(&state->sem); + mutex_init(&state->sem); dmabuf = &state->dmabuf; dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if(dmabuf->pbuf==NULL) @@ -3241,10 +3242,10 @@ static int cs_open(struct inode *inode, struct file *file) state->virt = 0; state->magic = CS_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - init_MUTEX(&state->open_sem); + mutex_init(&state->open_mutex); file->private_data = card; - down(&state->open_sem); + mutex_lock(&state->open_mutex); /* set default sample format. According to OSS Programmer's Guide /dev/dsp should be default to unsigned 8-bits, mono, with sample rate 8kHz and @@ -3260,7 +3261,7 @@ static int cs_open(struct inode *inode, struct file *file) cs_set_divisor(dmabuf); state->open_mode |= FMODE_READ; - up(&state->open_sem); + mutex_unlock(&state->open_mutex); } if(file->f_mode & FMODE_WRITE) { @@ -3271,7 +3272,7 @@ static int cs_open(struct inode *inode, struct file *file) if (state == NULL) return -ENOMEM; memset(state, 0, sizeof(struct cs_state)); - init_MUTEX(&state->sem); + mutex_init(&state->sem); dmabuf = &state->dmabuf; dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if(dmabuf->pbuf==NULL) @@ -3312,10 +3313,10 @@ static int cs_open(struct inode *inode, struct file *file) state->virt = 1; state->magic = CS_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - init_MUTEX(&state->open_sem); + mutex_init(&state->open_mutex); file->private_data = card; - down(&state->open_sem); + mutex_lock(&state->open_mutex); /* set default sample format. According to OSS Programmer's Guide /dev/dsp should be default to unsigned 8-bits, mono, with sample rate 8kHz and @@ -3331,7 +3332,7 @@ static int cs_open(struct inode *inode, struct file *file) cs_set_divisor(dmabuf); state->open_mode |= FMODE_WRITE; - up(&state->open_sem); + mutex_unlock(&state->open_mutex); if((ret = prog_dmabuf(state))) return ret; } @@ -3363,14 +3364,14 @@ static int cs_release(struct inode *inode, struct file *file) cs_clear_tail(state); drain_dac(state, file->f_flags & O_NONBLOCK); /* stop DMA state machine and free DMA buffers/channels */ - down(&state->open_sem); + mutex_lock(&state->open_mutex); stop_dac(state); dealloc_dmabuf(state); state->card->free_pcm_channel(state->card, dmabuf->channel->num); free_page((unsigned long)state->dmabuf.pbuf); - /* we're covered by the open_sem */ - up(&state->open_sem); + /* we're covered by the open_mutex */ + mutex_unlock(&state->open_mutex); state->card->states[state->virt] = NULL; state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); @@ -3395,14 +3396,14 @@ static int cs_release(struct inode *inode, struct file *file) { CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n") ); dmabuf = &state->dmabuf; - down(&state->open_sem); + mutex_lock(&state->open_mutex); stop_adc(state); dealloc_dmabuf(state); state->card->free_pcm_channel(state->card, dmabuf->channel->num); free_page((unsigned long)state->dmabuf.pbuf); - /* we're covered by the open_sem */ - up(&state->open_sem); + /* we're covered by the open_mutex */ + mutex_unlock(&state->open_mutex); state->card->states[state->virt] = NULL; state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); @@ -5507,7 +5508,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev, } init_waitqueue_head(&card->midi.open_wait); - init_MUTEX(&card->midi.open_sem); + mutex_init(&card->midi.open_mutex); init_waitqueue_head(&card->midi.iwait); init_waitqueue_head(&card->midi.owait); cs461x_pokeBA0(card, BA0_MIDCR, MIDCR_MRST); diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c index 74f975676cc..6ba8d6f45fe 100644 --- a/sound/oss/dmasound/dmasound_awacs.c +++ b/sound/oss/dmasound/dmasound_awacs.c @@ -80,7 +80,7 @@ #include <linux/kmod.h> #include <linux/interrupt.h> #include <linux/input.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #ifdef CONFIG_ADB_CUDA #include <linux/cuda.h> #endif @@ -88,8 +88,6 @@ #include <linux/pmu.h> #endif -#include <linux/i2c-dev.h> - #include <asm/uaccess.h> #include <asm/prom.h> #include <asm/machdep.h> @@ -130,7 +128,7 @@ static struct resource awacs_rsrc[3]; static char awacs_name[64]; static int awacs_revision; static int awacs_sleeping; -static DECLARE_MUTEX(dmasound_sem); +static DEFINE_MUTEX(dmasound_mutex); static int sound_device_id; /* exists after iMac revA */ static int hw_can_byteswap = 1 ; /* most pmac sound h/w can */ @@ -312,11 +310,11 @@ extern int daca_enter_sleep(void); extern int daca_leave_sleep(void); #define TRY_LOCK() \ - if ((rc = down_interruptible(&dmasound_sem)) != 0) \ + if ((rc = mutex_lock_interruptible(&dmasound_mutex)) != 0) \ return rc; -#define LOCK() down(&dmasound_sem); +#define LOCK() mutex_lock(&dmasound_mutex); -#define UNLOCK() up(&dmasound_sem); +#define UNLOCK() mutex_unlock(&dmasound_mutex); /* We use different versions that the ones provided in dmasound.h * diff --git a/sound/oss/emu10k1/hwaccess.h b/sound/oss/emu10k1/hwaccess.h index 104223a192a..85e27bda694 100644 --- a/sound/oss/emu10k1/hwaccess.h +++ b/sound/oss/emu10k1/hwaccess.h @@ -181,7 +181,7 @@ struct emu10k1_card struct emu10k1_mpuout *mpuout; struct emu10k1_mpuin *mpuin; - struct semaphore open_sem; + struct mutex open_sem; mode_t open_mode; wait_queue_head_t open_wait; diff --git a/sound/oss/emu10k1/main.c b/sound/oss/emu10k1/main.c index 23241cbdd90..0cd44a6f7ac 100644 --- a/sound/oss/emu10k1/main.c +++ b/sound/oss/emu10k1/main.c @@ -1320,7 +1320,7 @@ static int __devinit emu10k1_probe(struct pci_dev *pci_dev, const struct pci_dev card->is_aps = (subsysvid == EMU_APS_SUBID); spin_lock_init(&card->lock); - init_MUTEX(&card->open_sem); + mutex_init(&card->open_sem); card->open_mode = 0; init_waitqueue_head(&card->open_wait); diff --git a/sound/oss/emu10k1/midi.c b/sound/oss/emu10k1/midi.c index b40b5f97aac..959a96794db 100644 --- a/sound/oss/emu10k1/midi.c +++ b/sound/oss/emu10k1/midi.c @@ -110,21 +110,21 @@ match: #endif /* Wait for device to become free */ - down(&card->open_sem); + mutex_lock(&card->open_sem); while (card->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - up(&card->open_sem); + mutex_unlock(&card->open_sem); return -EBUSY; } - up(&card->open_sem); + mutex_unlock(&card->open_sem); interruptible_sleep_on(&card->open_wait); if (signal_pending(current)) { return -ERESTARTSYS; } - down(&card->open_sem); + mutex_lock(&card->open_sem); } if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL) @@ -183,7 +183,7 @@ match: card->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - up(&card->open_sem); + mutex_unlock(&card->open_sem); return nonseekable_open(inode, file); } @@ -234,9 +234,9 @@ static int emu10k1_midi_release(struct inode *inode, struct file *file) kfree(midi_dev); - down(&card->open_sem); + mutex_lock(&card->open_sem); card->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE)); - up(&card->open_sem); + mutex_unlock(&card->open_sem); wake_up_interruptible(&card->open_wait); unlock_kernel(); diff --git a/sound/oss/es1370.c b/sound/oss/es1370.c index ae55c536613..094f569cc6e 100644 --- a/sound/oss/es1370.c +++ b/sound/oss/es1370.c @@ -157,6 +157,7 @@ #include <linux/gameport.h> #include <linux/wait.h> #include <linux/dma-mapping.h> +#include <linux/mutex.h> #include <asm/io.h> #include <asm/page.h> @@ -346,7 +347,7 @@ struct es1370_state { unsigned sctrl; spinlock_t lock; - struct semaphore open_sem; + struct mutex open_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -393,7 +394,7 @@ struct es1370_state { struct gameport *gameport; #endif - struct semaphore sem; + struct mutex mutex; }; /* --------------------------------------------------------------------- */ @@ -1159,7 +1160,7 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count, return -ENXIO; if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - down(&s->sem); + mutex_lock(&s->mutex); if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) goto out; @@ -1183,14 +1184,14 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count, ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->mutex); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out; } - down(&s->sem); + mutex_lock(&s->mutex); if (s->dma_adc.mapped) { ret = -ENXIO; @@ -1215,7 +1216,7 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count, start_adc(s); } out: - up(&s->sem); + mutex_unlock(&s->mutex); remove_wait_queue(&s->dma_adc.wait, &wait); set_current_state(TASK_RUNNING); return ret; @@ -1235,7 +1236,7 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t return -ENXIO; if (!access_ok(VERIFY_READ, buffer, count)) return -EFAULT; - down(&s->sem); + mutex_lock(&s->mutex); if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s))) goto out; ret = 0; @@ -1263,14 +1264,14 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->mutex); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out; } - down(&s->sem); + mutex_lock(&s->mutex); if (s->dma_dac2.mapped) { ret = -ENXIO; @@ -1296,7 +1297,7 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t start_dac2(s); } out: - up(&s->sem); + mutex_unlock(&s->mutex); remove_wait_queue(&s->dma_dac2.wait, &wait); set_current_state(TASK_RUNNING); return ret; @@ -1348,7 +1349,7 @@ static int es1370_mmap(struct file *file, struct vm_area_struct *vma) VALIDATE_STATE(s); lock_kernel(); - down(&s->sem); + mutex_lock(&s->mutex); if (vma->vm_flags & VM_WRITE) { if ((ret = prog_dmabuf_dac2(s)) != 0) { goto out; @@ -1380,7 +1381,7 @@ static int es1370_mmap(struct file *file, struct vm_area_struct *vma) } db->mapped = 1; out: - up(&s->sem); + mutex_unlock(&s->mutex); unlock_kernel(); return ret; } @@ -1752,21 +1753,21 @@ static int es1370_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_READ|FMODE_WRITE))) @@ -1793,8 +1794,8 @@ static int es1370_open(struct inode *inode, struct file *file) outl(s->ctrl, s->io+ES1370_REG_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); - init_MUTEX(&s->sem); + mutex_unlock(&s->open_mutex); + mutex_init(&s->mutex); return nonseekable_open(inode, file); } @@ -1806,7 +1807,7 @@ static int es1370_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac2(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac2(s); synchronize_irq(s->irq); @@ -1818,7 +1819,7 @@ static int es1370_release(struct inode *inode, struct file *file) } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2198,21 +2199,21 @@ static int es1370_open_dac(struct inode *inode, struct file *file) return -EINVAL; file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & FMODE_DAC) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0; s->dma_dac1.enabled = 1; @@ -2227,7 +2228,7 @@ static int es1370_open_dac(struct inode *inode, struct file *file) outl(s->ctrl, s->io+ES1370_REG_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= FMODE_DAC; - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2238,12 +2239,12 @@ static int es1370_release_dac(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); drain_dac1(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); stop_dac1(s); dealloc_dmabuf(s, &s->dma_dac1); s->open_mode &= ~FMODE_DAC; wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2430,21 +2431,21 @@ static int es1370_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2465,7 +2466,7 @@ static int es1370_midi_open(struct inode *inode, struct file *file) es1370_handle_midi(s); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2499,7 +2500,7 @@ static int es1370_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - down(&s->open_sem); + mutex_lock(&s->open_mutex); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2508,7 +2509,7 @@ static int es1370_midi_release(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2638,7 +2639,7 @@ static int __devinit es1370_probe(struct pci_dev *pcidev, const struct pci_devic init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->magic = ES1370_MAGIC; s->dev = pcidev; diff --git a/sound/oss/es1371.c b/sound/oss/es1371.c index 5c697f16257..4400c853868 100644 --- a/sound/oss/es1371.c +++ b/sound/oss/es1371.c @@ -129,6 +129,7 @@ #include <linux/gameport.h> #include <linux/wait.h> #include <linux/dma-mapping.h> +#include <linux/mutex.h> #include <asm/io.h> #include <asm/page.h> @@ -419,7 +420,7 @@ struct es1371_state { unsigned dac1rate, dac2rate, adcrate; spinlock_t lock; - struct semaphore open_sem; + struct mutex open_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -462,7 +463,7 @@ struct es1371_state { struct gameport *gameport; #endif - struct semaphore sem; + struct mutex sem; }; /* --------------------------------------------------------------------- */ @@ -1346,7 +1347,7 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count, return -ENXIO; if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - down(&s->sem); + mutex_lock(&s->sem); if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) goto out2; @@ -1370,14 +1371,14 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count, ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - down(&s->sem); + mutex_lock(&s->sem); if (s->dma_adc.mapped) { ret = -ENXIO; @@ -1402,7 +1403,7 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count, start_adc(s); } out: - up(&s->sem); + mutex_unlock(&s->sem); out2: remove_wait_queue(&s->dma_adc.wait, &wait); set_current_state(TASK_RUNNING); @@ -1423,7 +1424,7 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t return -ENXIO; if (!access_ok(VERIFY_READ, buffer, count)) return -EFAULT; - down(&s->sem); + mutex_lock(&s->sem); if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s))) goto out3; ret = 0; @@ -1451,14 +1452,14 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t ret = -EAGAIN; goto out; } - up(&s->sem); + mutex_unlock(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - down(&s->sem); + mutex_lock(&s->sem); if (s->dma_dac2.mapped) { ret = -ENXIO; @@ -1484,7 +1485,7 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t start_dac2(s); } out: - up(&s->sem); + mutex_unlock(&s->sem); out2: remove_wait_queue(&s->dma_dac2.wait, &wait); out3: @@ -1538,7 +1539,7 @@ static int es1371_mmap(struct file *file, struct vm_area_struct *vma) VALIDATE_STATE(s); lock_kernel(); - down(&s->sem); + mutex_lock(&s->sem); if (vma->vm_flags & VM_WRITE) { if ((ret = prog_dmabuf_dac2(s)) != 0) { @@ -1571,7 +1572,7 @@ static int es1371_mmap(struct file *file, struct vm_area_struct *vma) } db->mapped = 1; out: - up(&s->sem); + mutex_unlock(&s->sem); unlock_kernel(); return ret; } @@ -1938,21 +1939,21 @@ static int es1371_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } if (file->f_mode & FMODE_READ) { s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; @@ -1982,8 +1983,8 @@ static int es1371_open(struct inode *inode, struct file *file) outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); - init_MUTEX(&s->sem); + mutex_unlock(&s->open_mutex); + mutex_init(&s->sem); return nonseekable_open(inode, file); } @@ -1995,7 +1996,7 @@ static int es1371_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac2(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac2(s); dealloc_dmabuf(s, &s->dma_dac2); @@ -2005,7 +2006,7 @@ static int es1371_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -2377,21 +2378,21 @@ static int es1371_open_dac(struct inode *inode, struct file *file) return -EINVAL; file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & FMODE_DAC) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0; s->dma_dac1.enabled = 1; @@ -2405,7 +2406,7 @@ static int es1371_open_dac(struct inode *inode, struct file *file) outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= FMODE_DAC; - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2416,11 +2417,11 @@ static int es1371_release_dac(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); drain_dac1(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); stop_dac1(s); dealloc_dmabuf(s, &s->dma_dac1); s->open_mode &= ~FMODE_DAC; - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -2608,21 +2609,21 @@ static int es1371_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2643,7 +2644,7 @@ static int es1371_midi_open(struct inode *inode, struct file *file) es1371_handle_midi(s); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2676,7 +2677,7 @@ static int es1371_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - down(&s->open_sem); + mutex_lock(&s->open_mutex); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2684,7 +2685,7 @@ static int es1371_midi_release(struct inode *inode, struct file *file) outl(s->ctrl, s->io+ES1371_REG_CONTROL); } spin_unlock_irqrestore(&s->lock, flags); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -2884,7 +2885,7 @@ static int __devinit es1371_probe(struct pci_dev *pcidev, const struct pci_devic init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->magic = ES1371_MAGIC; s->dev = pcidev; diff --git a/sound/oss/esssolo1.c b/sound/oss/esssolo1.c index 849b59f67ef..78d3e29ce96 100644 --- a/sound/oss/esssolo1.c +++ b/sound/oss/esssolo1.c @@ -105,6 +105,8 @@ #include <linux/gameport.h> #include <linux/wait.h> #include <linux/dma-mapping.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/page.h> @@ -191,7 +193,7 @@ struct solo1_state { unsigned ena; spinlock_t lock; - struct semaphore open_sem; + struct mutex open_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -1581,7 +1583,7 @@ static int solo1_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); outb(0, s->iobase+6); /* disable DMA */ @@ -1595,7 +1597,7 @@ static int solo1_release(struct inode *inode, struct file *file) } s->open_mode &= ~(FMODE_READ | FMODE_WRITE); wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -1624,21 +1626,21 @@ static int solo1_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & (FMODE_READ | FMODE_WRITE)) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } s->fmt = AFMT_U8; s->channels = 1; @@ -1650,7 +1652,7 @@ static int solo1_open(struct inode *inode, struct file *file) s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; s->dma_dac.enabled = 1; s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); prog_codec(s); return nonseekable_open(inode, file); } @@ -1911,21 +1913,21 @@ static int solo1_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -1951,7 +1953,7 @@ static int solo1_midi_open(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -1985,7 +1987,7 @@ static int solo1_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - down(&s->open_sem); + mutex_lock(&s->open_mutex); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -1994,7 +1996,7 @@ static int solo1_midi_release(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2132,24 +2134,24 @@ static int solo1_dmfm_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & FMODE_DMFM) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } if (!request_region(s->sbbase, FMSYNTH_EXTENT, "ESS Solo1")) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); printk(KERN_ERR "solo1: FM synth io ports in use, opl3 loaded?\n"); return -EBUSY; } @@ -2161,7 +2163,7 @@ static int solo1_dmfm_open(struct inode *inode, struct file *file) outb(5, s->sbbase+2); outb(1, s->sbbase+3); /* enable OPL3 */ s->open_mode |= FMODE_DMFM; - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2172,7 +2174,7 @@ static int solo1_dmfm_release(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); - down(&s->open_sem); + mutex_lock(&s->open_mutex); s->open_mode &= ~FMODE_DMFM; for (regb = 0xb0; regb < 0xb9; regb++) { outb(regb, s->sbbase); @@ -2182,7 +2184,7 @@ static int solo1_dmfm_release(struct inode *inode, struct file *file) } release_region(s->sbbase, FMSYNTH_EXTENT); wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2362,7 +2364,7 @@ static int __devinit solo1_probe(struct pci_dev *pcidev, const struct pci_device init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->magic = SOLO1_MAGIC; s->dev = pcidev; diff --git a/sound/oss/forte.c b/sound/oss/forte.c index 8406bc90c4f..0294eec8ad9 100644 --- a/sound/oss/forte.c +++ b/sound/oss/forte.c @@ -43,6 +43,7 @@ #include <linux/interrupt.h> #include <linux/proc_fs.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -185,7 +186,7 @@ struct forte_chip { unsigned long iobase; int irq; - struct semaphore open_sem; /* Device access */ + struct mutex open_mutex; /* Device access */ spinlock_t lock; /* State */ spinlock_t ac97_lock; @@ -1242,13 +1243,13 @@ forte_dsp_open (struct inode *inode, struct file *file) struct forte_chip *chip = forte; /* FIXME: HACK FROM HELL! */ if (file->f_flags & O_NONBLOCK) { - if (down_trylock (&chip->open_sem)) { + if (!mutex_trylock(&chip->open_mutex)) { DPRINTK ("%s: returning -EAGAIN\n", __FUNCTION__); return -EAGAIN; } } else { - if (down_interruptible (&chip->open_sem)) { + if (mutex_lock_interruptible(&chip->open_mutex)) { DPRINTK ("%s: returning -ERESTARTSYS\n", __FUNCTION__); return -ERESTARTSYS; } @@ -1302,7 +1303,7 @@ forte_dsp_release (struct inode *inode, struct file *file) spin_unlock_irq (&chip->lock); } - up (&chip->open_sem); + mutex_unlock(&chip->open_mutex); return ret; } @@ -2011,7 +2012,7 @@ forte_probe (struct pci_dev *pci_dev, const struct pci_device_id *pci_id) memset (chip, 0, sizeof (struct forte_chip)); chip->pci_dev = pci_dev; - init_MUTEX(&chip->open_sem); + mutex_init(&chip->open_mutex); spin_lock_init (&chip->lock); spin_lock_init (&chip->ac97_lock); diff --git a/sound/oss/hal2.c b/sound/oss/hal2.c index afe97c4ce06..dd4f59d30a3 100644 --- a/sound/oss/hal2.c +++ b/sound/oss/hal2.c @@ -32,6 +32,8 @@ #include <linux/dma-mapping.h> #include <linux/sound.h> #include <linux/soundcard.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/sgi/hpc3.h> @@ -92,7 +94,7 @@ struct hal2_codec { wait_queue_head_t dma_wait; spinlock_t lock; - struct semaphore sem; + struct mutex sem; int usecount; /* recording and playback are * independent */ @@ -1178,7 +1180,7 @@ static ssize_t hal2_read(struct file *file, char *buffer, if (!count) return 0; - if (down_interruptible(&adc->sem)) + if (mutex_lock_interruptible(&adc->sem)) return -EINTR; if (file->f_flags & O_NONBLOCK) { err = hal2_get_buffer(hal2, buffer, count); @@ -1217,7 +1219,7 @@ static ssize_t hal2_read(struct file *file, char *buffer, } } while (count > 0 && err >= 0); } - up(&adc->sem); + mutex_unlock(&adc->sem); return err; } @@ -1232,7 +1234,7 @@ static ssize_t hal2_write(struct file *file, const char *buffer, if (!count) return 0; - if (down_interruptible(&dac->sem)) + if (mutex_lock_interruptible(&dac->sem)) return -EINTR; if (file->f_flags & O_NONBLOCK) { err = hal2_add_buffer(hal2, buf, count); @@ -1271,7 +1273,7 @@ static ssize_t hal2_write(struct file *file, const char *buffer, } } while (count > 0 && err >= 0); } - up(&dac->sem); + mutex_unlock(&dac->sem); return err; } @@ -1356,20 +1358,20 @@ static int hal2_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_READ) { struct hal2_codec *adc = &hal2->adc; - down(&adc->sem); + mutex_lock(&adc->sem); hal2_stop_adc(hal2); hal2_free_adc_dmabuf(adc); adc->usecount--; - up(&adc->sem); + mutex_unlock(&adc->sem); } if (file->f_mode & FMODE_WRITE) { struct hal2_codec *dac = &hal2->dac; - down(&dac->sem); + mutex_lock(&dac->sem); hal2_sync_dac(hal2); hal2_free_dac_dmabuf(dac); dac->usecount--; - up(&dac->sem); + mutex_unlock(&dac->sem); } return 0; @@ -1400,7 +1402,7 @@ static void hal2_init_codec(struct hal2_codec *codec, struct hpc3_regs *hpc3, codec->pbus.pbusnr = index; codec->pbus.pbus = &hpc3->pbdma[index]; init_waitqueue_head(&codec->dma_wait); - init_MUTEX(&codec->sem); + mutex_init(&codec->sem); spin_lock_init(&codec->lock); } diff --git a/sound/oss/i810_audio.c b/sound/oss/i810_audio.c index abc242abd5b..dd2b871cdac 100644 --- a/sound/oss/i810_audio.c +++ b/sound/oss/i810_audio.c @@ -100,6 +100,8 @@ #include <linux/smp_lock.h> #include <linux/ac97_codec.h> #include <linux/bitops.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> #define DRIVER_VERSION "1.01" @@ -331,7 +333,7 @@ struct i810_state { struct i810_card *card; /* Card info */ /* single open lock mechanism, only used for recording */ - struct semaphore open_sem; + struct mutex open_mutex; wait_queue_head_t open_wait; /* file mode */ @@ -2597,7 +2599,7 @@ found_virt: state->card = card; state->magic = I810_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - init_MUTEX(&state->open_sem); + mutex_init(&state->open_mutex); file->private_data = state; dmabuf->trigger = 0; @@ -3213,7 +3215,7 @@ static void __devinit i810_configure_clocking (void) state->card = card; state->magic = I810_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - init_MUTEX(&state->open_sem); + mutex_init(&state->open_mutex); dmabuf->fmt = I810_FMT_STEREO | I810_FMT_16BIT; dmabuf->trigger = PCM_ENABLE_OUTPUT; i810_set_spdif_output(state, -1, 0); diff --git a/sound/oss/ite8172.c b/sound/oss/ite8172.c index 8fd2f9a9e66..ffcb910f5c3 100644 --- a/sound/oss/ite8172.c +++ b/sound/oss/ite8172.c @@ -71,6 +71,8 @@ #include <linux/smp_lock.h> #include <linux/ac97_codec.h> #include <linux/interrupt.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> @@ -304,7 +306,7 @@ struct it8172_state { unsigned dacrate, adcrate; spinlock_t lock; - struct semaphore open_sem; + struct mutex open_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -1801,21 +1803,21 @@ static int it8172_open(struct inode *inode, struct file *file) } file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&s->lock, flags); @@ -1850,7 +1852,7 @@ static int it8172_open(struct inode *inode, struct file *file) spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE)); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -1864,7 +1866,7 @@ static int it8172_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1874,7 +1876,7 @@ static int it8172_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1997,7 +1999,7 @@ static int __devinit it8172_probe(struct pci_dev *pcidev, init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->dev = pcidev; s->io = pci_resource_start(pcidev, 0); diff --git a/sound/oss/maestro.c b/sound/oss/maestro.c index d4b569acf76..e647f2f8627 100644 --- a/sound/oss/maestro.c +++ b/sound/oss/maestro.c @@ -223,6 +223,8 @@ #include <linux/reboot.h> #include <linux/bitops.h> #include <linux/wait.h> +#include <linux/mutex.h> + #include <asm/current.h> #include <asm/dma.h> @@ -397,7 +399,7 @@ struct ess_state { /* this locks around the oss state in the driver */ spinlock_t lock; /* only let 1 be opening at a time */ - struct semaphore open_sem; + struct mutex open_mutex; wait_queue_head_t open_wait; mode_t open_mode; @@ -3020,26 +3022,26 @@ ess_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EWOULDBLOCK; } - up(&s->open_sem); + mutex_unlock(&s->open_mutex); interruptible_sleep_on(&s->open_wait); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } /* under semaphore.. */ if ((s->card->dmapages==NULL) && allocate_buffers(s)) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -ENOMEM; } - /* we're covered by the open_sem */ + /* we're covered by the open_mutex */ if( ! s->card->dsps_open ) { maestro_power(s->card,ACPI_D0); start_bob(s); @@ -3076,7 +3078,7 @@ ess_open(struct inode *inode, struct file *file) set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -3089,7 +3091,7 @@ ess_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); } @@ -3098,7 +3100,7 @@ ess_release(struct inode *inode, struct file *file) } s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); - /* we're covered by the open_sem */ + /* we're covered by the open_mutex */ M_printk("maestro: %d dsps now alive\n",s->card->dsps_open-1); if( --s->card->dsps_open <= 0) { s->card->dsps_open = 0; @@ -3106,7 +3108,7 @@ ess_release(struct inode *inode, struct file *file) free_buffers(s); maestro_power(s->card,ACPI_D2); } - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -3466,7 +3468,7 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid) init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); spin_lock_init(&s->lock); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); s->magic = ESS_STATE_MAGIC; s->apu[0] = 6*i; diff --git a/sound/oss/maestro3.c b/sound/oss/maestro3.c index f3dec70fcb9..66044aff258 100644 --- a/sound/oss/maestro3.c +++ b/sound/oss/maestro3.c @@ -144,6 +144,8 @@ #include <linux/spinlock.h> #include <linux/ac97_codec.h> #include <linux/wait.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/dma.h> @@ -205,7 +207,7 @@ struct m3_state { when irqhandler uses s->lock and m3_assp_read uses card->lock ? */ - struct semaphore open_sem; + struct mutex open_mutex; wait_queue_head_t open_wait; mode_t open_mode; @@ -2013,17 +2015,17 @@ static int m3_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EWOULDBLOCK; } - up(&s->open_sem); + mutex_unlock(&s->open_mutex); interruptible_sleep_on(&s->open_wait); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&c->lock, flags); @@ -2047,7 +2049,7 @@ static int m3_open(struct inode *inode, struct file *file) set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); spin_unlock_irqrestore(&c->lock, flags); return nonseekable_open(inode, file); } @@ -2062,7 +2064,7 @@ static int m3_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); spin_lock_irqsave(&card->lock, flags); if (file->f_mode & FMODE_WRITE) { @@ -2083,7 +2085,7 @@ static int m3_release(struct inode *inode, struct file *file) s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); spin_unlock_irqrestore(&card->lock, flags); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); return 0; @@ -2679,7 +2681,7 @@ static int __devinit m3_probe(struct pci_dev *pci_dev, const struct pci_device_i init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - init_MUTEX(&(s->open_sem)); + mutex_init(&(s->open_mutex)); s->magic = M3_STATE_MAGIC; m3_assp_client_init(s); diff --git a/sound/oss/nec_vrc5477.c b/sound/oss/nec_vrc5477.c index fbb9170e8e0..21c1954d910 100644 --- a/sound/oss/nec_vrc5477.c +++ b/sound/oss/nec_vrc5477.c @@ -78,6 +78,8 @@ #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/ac97_codec.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> @@ -198,7 +200,7 @@ struct vrc5477_ac97_state { unsigned short extended_status; spinlock_t lock; - struct semaphore open_sem; + struct mutex open_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -1617,22 +1619,22 @@ static int vrc5477_ac97_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&s->lock, flags); @@ -1659,7 +1661,7 @@ static int vrc5477_ac97_open(struct inode *inode, struct file *file) bailout: spin_unlock_irqrestore(&s->lock, flags); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return ret; } @@ -1671,7 +1673,7 @@ static int vrc5477_ac97_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1681,7 +1683,7 @@ static int vrc5477_ac97_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1867,7 +1869,7 @@ static int __devinit vrc5477_ac97_probe(struct pci_dev *pcidev, init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->dev = pcidev; diff --git a/sound/oss/rme96xx.c b/sound/oss/rme96xx.c index faa0b7919b6..a1ec9d131ab 100644 --- a/sound/oss/rme96xx.c +++ b/sound/oss/rme96xx.c @@ -58,6 +58,7 @@ TODO: #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/wait.h> +#include <linux/mutex.h> #include <asm/dma.h> #include <asm/page.h> @@ -326,7 +327,7 @@ typedef struct _rme96xx_info { /* waiting and locking */ wait_queue_head_t wait; - struct semaphore open_sem; + struct mutex open_mutex; wait_queue_head_t open_wait; } dma[RME96xx_MAX_DEVS]; @@ -842,7 +843,7 @@ static void busmaster_free(void* ptr,int size) { static int rme96xx_dmabuf_init(rme96xx_info * s,struct dmabuf* dma,int ioffset,int ooffset) { - init_MUTEX(&dma->open_sem); + mutex_init(&dma->open_mutex); init_waitqueue_head(&dma->open_wait); init_waitqueue_head(&dma->wait); dma->s = s; @@ -1469,21 +1470,21 @@ static int rme96xx_open(struct inode *in, struct file *f) dma = &s->dma[devnum]; f->private_data = dma; /* wait for device to become free */ - down(&dma->open_sem); + mutex_lock(&dma->open_mutex); while (dma->open_mode & f->f_mode) { if (f->f_flags & O_NONBLOCK) { - up(&dma->open_sem); + mutex_unlock(&dma->open_mutex); return -EBUSY; } add_wait_queue(&dma->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&dma->open_sem); + mutex_unlock(&dma->open_mutex); schedule(); remove_wait_queue(&dma->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&dma->open_sem); + mutex_lock(&dma->open_mutex); } COMM ("hardware open") @@ -1492,7 +1493,7 @@ static int rme96xx_open(struct inode *in, struct file *f) dma->open_mode |= (f->f_mode & (FMODE_READ | FMODE_WRITE)); dma->opened = 1; - up(&dma->open_sem); + mutex_unlock(&dma->open_mutex); DBG(printk("device num %d open finished\n",devnum)); return 0; @@ -1524,7 +1525,7 @@ static int rme96xx_release(struct inode *in, struct file *file) } wake_up(&dma->open_wait); - up(&dma->open_sem); + mutex_unlock(&dma->open_mutex); return 0; } diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c index 71b05e2f697..69a4b8778b5 100644 --- a/sound/oss/sonicvibes.c +++ b/sound/oss/sonicvibes.c @@ -116,6 +116,8 @@ #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/gameport.h> +#include <linux/mutex.h> + #include <asm/io.h> #include <asm/uaccess.h> @@ -328,7 +330,7 @@ struct sv_state { unsigned char fmt, enable; spinlock_t lock; - struct semaphore open_sem; + struct mutex open_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -1922,21 +1924,21 @@ static int sv_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } if (file->f_mode & FMODE_READ) { fmtm &= ~((SV_CFMT_STEREO | SV_CFMT_16BIT) << SV_CFMT_CSHIFT); @@ -1956,7 +1958,7 @@ static int sv_open(struct inode *inode, struct file *file) } set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -1968,7 +1970,7 @@ static int sv_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem); + mutex_lock(&s->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1979,7 +1981,7 @@ static int sv_release(struct inode *inode, struct file *file) } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2167,21 +2169,21 @@ static int sv_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2210,7 +2212,7 @@ static int sv_midi_open(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2248,7 +2250,7 @@ static int sv_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - down(&s->open_sem); + mutex_lock(&s->open_mutex); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2257,7 +2259,7 @@ static int sv_midi_release(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2388,21 +2390,21 @@ static int sv_dmfm_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - down(&s->open_sem); + mutex_lock(&s->open_mutex); while (s->open_mode & FMODE_DMFM) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - down(&s->open_sem); + mutex_lock(&s->open_mutex); } /* init the stuff */ outb(1, s->iosynth); @@ -2412,7 +2414,7 @@ static int sv_dmfm_open(struct inode *inode, struct file *file) outb(5, s->iosynth+2); outb(1, s->iosynth+3); /* enable OPL3 */ s->open_mode |= FMODE_DMFM; - up(&s->open_sem); + mutex_unlock(&s->open_mutex); return nonseekable_open(inode, file); } @@ -2423,7 +2425,7 @@ static int sv_dmfm_release(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); - down(&s->open_sem); + mutex_lock(&s->open_mutex); s->open_mode &= ~FMODE_DMFM; for (regb = 0xb0; regb < 0xb9; regb++) { outb(regb, s->iosynth); @@ -2432,7 +2434,7 @@ static int sv_dmfm_release(struct inode *inode, struct file *file) outb(0, s->iosynth+3); } wake_up(&s->open_wait); - up(&s->open_sem); + mutex_unlock(&s->open_mutex); unlock_kernel(); return 0; } @@ -2582,7 +2584,7 @@ static int __devinit sv_probe(struct pci_dev *pcidev, const struct pci_device_id init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - init_MUTEX(&s->open_sem); + mutex_init(&s->open_mutex); spin_lock_init(&s->lock); s->magic = SV_MAGIC; s->dev = pcidev; diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c index df4d3771fa8..dce9016cbcf 100644 --- a/sound/oss/swarm_cs4297a.c +++ b/sound/oss/swarm_cs4297a.c @@ -76,6 +76,7 @@ #include <linux/init.h> #include <linux/poll.h> #include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <asm/dma.h> @@ -291,9 +292,9 @@ struct cs4297a_state { unsigned conversion:1; // conversion from 16 to 8 bit in progress unsigned ena; spinlock_t lock; - struct semaphore open_sem; - struct semaphore open_sem_adc; - struct semaphore open_sem_dac; + struct mutex open_mutex; + struct mutex open_sem_adc; + struct mutex open_sem_dac; mode_t open_mode; wait_queue_head_t open_wait; wait_queue_head_t open_wait_adc; @@ -2352,20 +2353,20 @@ static int cs4297a_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_WRITE) { drain_dac(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem_dac); + mutex_lock(&s->open_sem_dac); stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); s->open_mode &= ~FMODE_WRITE; - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); wake_up(&s->open_wait_dac); } if (file->f_mode & FMODE_READ) { drain_adc(s, file->f_flags & O_NONBLOCK); - down(&s->open_sem_adc); + mutex_lock(&s->open_sem_adc); stop_adc(s); dealloc_dmabuf(s, &s->dma_adc); s->open_mode &= ~FMODE_READ; - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); wake_up(&s->open_wait_adc); } return 0; @@ -2413,37 +2414,37 @@ static int cs4297a_open(struct inode *inode, struct file *file) ; } - down(&s->open_sem_dac); + mutex_lock(&s->open_sem_dac); while (s->open_mode & FMODE_WRITE) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); return -EBUSY; } - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); interruptible_sleep_on(&s->open_wait_dac); if (signal_pending(current)) { printk("open - sig pending\n"); return -ERESTARTSYS; } - down(&s->open_sem_dac); + mutex_lock(&s->open_sem_dac); } } if (file->f_mode & FMODE_READ) { - down(&s->open_sem_adc); + mutex_lock(&s->open_sem_adc); while (s->open_mode & FMODE_READ) { if (file->f_flags & O_NONBLOCK) { - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); return -EBUSY; } - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); interruptible_sleep_on(&s->open_wait_adc); if (signal_pending(current)) { printk("open - sig pending\n"); return -ERESTARTSYS; } - down(&s->open_sem_adc); + mutex_lock(&s->open_sem_adc); } } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); @@ -2456,7 +2457,7 @@ static int cs4297a_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_READ; s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; - up(&s->open_sem_adc); + mutex_unlock(&s->open_sem_adc); if (prog_dmabuf_adc(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -2474,7 +2475,7 @@ static int cs4297a_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_WRITE; s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; - up(&s->open_sem_dac); + mutex_unlock(&s->open_sem_dac); if (prog_dmabuf_dac(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -2631,8 +2632,8 @@ static int __init cs4297a_init(void) init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->open_wait_adc); init_waitqueue_head(&s->open_wait_dac); - init_MUTEX(&s->open_sem_adc); - init_MUTEX(&s->open_sem_dac); + mutex_init(&s->open_sem_adc); + mutex_init(&s->open_sem_dac); spin_lock_init(&s->lock); s->irq = K_INT_SER_1; diff --git a/sound/oss/trident.c b/sound/oss/trident.c index a21c663e7e1..e61a454a815 100644 --- a/sound/oss/trident.c +++ b/sound/oss/trident.c @@ -190,7 +190,7 @@ * * Lock order (high->low) * lock - hardware lock - * open_sem - guard opens + * open_mutex - guard opens * sem - guard dmabuf, write re-entry etc */ @@ -216,6 +216,8 @@ #include <linux/pm.h> #include <linux/gameport.h> #include <linux/kernel.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> #include <asm/io.h> #include <asm/dma.h> @@ -349,7 +351,7 @@ struct trident_state { unsigned chans_num; unsigned long fmt_flag; /* Guard against mmap/write/read races */ - struct semaphore sem; + struct mutex sem; }; @@ -402,7 +404,7 @@ struct trident_card { struct trident_card *next; /* single open lock mechanism, only used for recording */ - struct semaphore open_sem; + struct mutex open_mutex; /* The trident has a certain amount of cross channel interaction so we use a single per card lock */ @@ -1881,7 +1883,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - down(&state->sem); + mutex_lock(&state->sem); if (!dmabuf->ready && (ret = prog_dmabuf_record(state))) goto out; @@ -1913,7 +1915,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos goto out; } - up(&state->sem); + mutex_unlock(&state->sem); /* No matter how much space left in the buffer, */ /* we have to wait until CSO == ESO/2 or CSO == ESO */ /* when address engine interrupts */ @@ -1940,7 +1942,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos ret = -ERESTARTSYS; goto out; } - down(&state->sem); + mutex_lock(&state->sem); if (dmabuf->mapped) { if (!ret) ret = -ENXIO; @@ -1968,7 +1970,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos start_adc(state); } out: - up(&state->sem); + mutex_unlock(&state->sem); return ret; } @@ -1996,7 +1998,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t * Guard against an mmap or ioctl while writing */ - down(&state->sem); + mutex_lock(&state->sem); if (dmabuf->mapped) { ret = -ENXIO; @@ -2045,7 +2047,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2); tmo >>= sample_shift[dmabuf->fmt]; unlock_set_fmt(state); - up(&state->sem); + mutex_unlock(&state->sem); /* There are two situations when sleep_on_timeout */ /* returns, one is when the interrupt is serviced */ @@ -2073,7 +2075,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t ret = -ERESTARTSYS; goto out_nolock; } - down(&state->sem); + mutex_lock(&state->sem); if (dmabuf->mapped) { if (!ret) ret = -ENXIO; @@ -2131,7 +2133,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t start_dac(state); } out: - up(&state->sem); + mutex_unlock(&state->sem); out_nolock: return ret; } @@ -2152,24 +2154,24 @@ trident_poll(struct file *file, struct poll_table_struct *wait) * prog_dmabuf events */ - down(&state->sem); + mutex_lock(&state->sem); if (file->f_mode & FMODE_WRITE) { if (!dmabuf->ready && prog_dmabuf_playback(state)) { - up(&state->sem); + mutex_unlock(&state->sem); return 0; } poll_wait(file, &dmabuf->wait, wait); } if (file->f_mode & FMODE_READ) { if (!dmabuf->ready && prog_dmabuf_record(state)) { - up(&state->sem); + mutex_unlock(&state->sem); return 0; } poll_wait(file, &dmabuf->wait, wait); } - up(&state->sem); + mutex_unlock(&state->sem); spin_lock_irqsave(&state->card->lock, flags); trident_update_ptr(state); @@ -2207,7 +2209,7 @@ trident_mmap(struct file *file, struct vm_area_struct *vma) * a read or write against an mmap. */ - down(&state->sem); + mutex_lock(&state->sem); if (vma->vm_flags & VM_WRITE) { if ((ret = prog_dmabuf_playback(state)) != 0) @@ -2232,7 +2234,7 @@ trident_mmap(struct file *file, struct vm_area_struct *vma) dmabuf->mapped = 1; ret = 0; out: - up(&state->sem); + mutex_unlock(&state->sem); return ret; } @@ -2429,15 +2431,15 @@ trident_ioctl(struct inode *inode, struct file *file, unlock_set_fmt(state); break; } - down(&state->card->open_sem); + mutex_lock(&state->card->open_mutex); ret = ali_allocate_other_states_resources(state, 6); if (ret < 0) { - up(&state->card->open_sem); + mutex_unlock(&state->card->open_mutex); unlock_set_fmt(state); break; } state->card->multi_channel_use_count++; - up(&state->card->open_sem); + mutex_unlock(&state->card->open_mutex); } else val = 2; /*yield to 2-channels */ } else @@ -2727,11 +2729,11 @@ trident_open(struct inode *inode, struct file *file) /* find an available virtual channel (instance of /dev/dsp) */ while (card != NULL) { - down(&card->open_sem); + mutex_lock(&card->open_mutex); if (file->f_mode & FMODE_READ) { /* Skip opens on cards that are in 6 channel mode */ if (card->multi_channel_use_count > 0) { - up(&card->open_sem); + mutex_unlock(&card->open_mutex); card = card->next; continue; } @@ -2740,16 +2742,16 @@ trident_open(struct inode *inode, struct file *file) if (card->states[i] == NULL) { state = card->states[i] = kmalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) { - up(&card->open_sem); + mutex_unlock(&card->open_mutex); return -ENOMEM; } memset(state, 0, sizeof(*state)); - init_MUTEX(&state->sem); + mutex_init(&state->sem); dmabuf = &state->dmabuf; goto found_virt; } } - up(&card->open_sem); + mutex_unlock(&card->open_mutex); card = card->next; } /* no more virtual channel avaiable */ @@ -2816,7 +2818,7 @@ trident_open(struct inode *inode, struct file *file) } state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&card->open_sem); + mutex_unlock(&card->open_mutex); pr_debug("trident: open virtual channel %d, hard channel %d\n", state->virt, dmabuf->channel->num); @@ -2845,7 +2847,7 @@ trident_release(struct inode *inode, struct file *file) state->virt, dmabuf->channel->num); /* stop DMA state machine and free DMA buffers/channels */ - down(&card->open_sem); + mutex_lock(&card->open_mutex); if (file->f_mode & FMODE_WRITE) { stop_dac(state); @@ -2878,8 +2880,8 @@ trident_release(struct inode *inode, struct file *file) card->states[state->virt] = NULL; kfree(state); - /* we're covered by the open_sem */ - up(&card->open_sem); + /* we're covered by the open_mutex */ + mutex_unlock(&card->open_mutex); return 0; } @@ -4405,7 +4407,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) card->banks[BANK_B].addresses = &bank_b_addrs; card->banks[BANK_B].bitmap = 0UL; - init_MUTEX(&card->open_sem); + mutex_init(&card->open_mutex); spin_lock_init(&card->lock); init_timer(&card->timer); diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c index 83edda93f0b..1a921ee71ab 100644 --- a/sound/oss/via82cxxx_audio.c +++ b/sound/oss/via82cxxx_audio.c @@ -38,7 +38,8 @@ #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/uaccess.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> + #include "sound_config.h" #include "dev_table.h" #include "mpu401.h" @@ -311,8 +312,8 @@ struct via_info { int mixer_vol; /* 8233/35 volume - not yet implemented */ - struct semaphore syscall_sem; - struct semaphore open_sem; + struct mutex syscall_mutex; + struct mutex open_mutex; /* The 8233/8235 have 4 DX audio channels, two record and one six channel out. We bind ch_in to DX 1, ch_out to multichannel @@ -505,10 +506,10 @@ static inline int via_syscall_down (struct via_info *card, int nonblock) nonblock = 0; if (nonblock) { - if (down_trylock (&card->syscall_sem)) + if (!mutex_trylock(&card->syscall_mutex)) return -EAGAIN; } else { - if (down_interruptible (&card->syscall_sem)) + if (mutex_lock_interruptible(&card->syscall_mutex)) return -ERESTARTSYS; } @@ -1609,7 +1610,7 @@ static int via_mixer_ioctl (struct inode *inode, struct file *file, unsigned int #endif rc = codec->mixer_ioctl(codec, cmd, arg); - up (&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); out: DPRINTK ("EXIT, returning %d\n", rc); @@ -2228,7 +2229,7 @@ static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma) if (wr) card->ch_out.is_mapped = 1; - up (&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); rc = 0; out: @@ -2256,7 +2257,7 @@ handle_one_block: /* Thomas Sailer: * But also to ourselves, release semaphore if we do so */ if (need_resched()) { - up(&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); schedule (); ret = via_syscall_down (card, nonblock); if (ret) @@ -2286,7 +2287,7 @@ handle_one_block: break; } - up(&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); DPRINTK ("Sleeping on block %d\n", n); schedule(); @@ -2402,7 +2403,7 @@ static ssize_t via_dsp_read(struct file *file, char __user *buffer, size_t count rc = via_dsp_do_read (card, buffer, count, nonblock); out_up: - up (&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); out: DPRINTK ("EXIT, returning %ld\n",(long) rc); return rc; @@ -2426,7 +2427,7 @@ handle_one_block: /* Thomas Sailer: * But also to ourselves, release semaphore if we do so */ if (need_resched()) { - up(&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); schedule (); ret = via_syscall_down (card, nonblock); if (ret) @@ -2456,7 +2457,7 @@ handle_one_block: break; } - up(&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); DPRINTK ("Sleeping on page %d, tmp==%d, ir==%d\n", n, tmp, chan->is_record); schedule(); @@ -2585,7 +2586,7 @@ static ssize_t via_dsp_write(struct file *file, const char __user *buffer, size_ rc = via_dsp_do_write (card, buffer, count, nonblock); out_up: - up (&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); out: DPRINTK ("EXIT, returning %ld\n",(long) rc); return rc; @@ -2634,7 +2635,7 @@ static unsigned int via_dsp_poll(struct file *file, struct poll_table_struct *wa * Sleeps until all playback has been flushed to the audio * hardware. * - * Locking: inside card->syscall_sem + * Locking: inside card->syscall_mutex */ static int via_dsp_drain_playback (struct via_info *card, @@ -2692,7 +2693,7 @@ static int via_dsp_drain_playback (struct via_info *card, printk (KERN_ERR "sleeping but not active\n"); #endif - up(&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); DPRINTK ("sleeping, nbufs=%d\n", atomic_read (&chan->n_frags)); schedule(); @@ -2748,7 +2749,7 @@ out: * * Handles SNDCTL_DSP_GETISPACE and SNDCTL_DSP_GETOSPACE. * - * Locking: inside card->syscall_sem + * Locking: inside card->syscall_mutex */ static int via_dsp_ioctl_space (struct via_info *card, @@ -2793,7 +2794,7 @@ static int via_dsp_ioctl_space (struct via_info *card, * * Handles SNDCTL_DSP_GETIPTR and SNDCTL_DSP_GETOPTR. * - * Locking: inside card->syscall_sem + * Locking: inside card->syscall_mutex */ static int via_dsp_ioctl_ptr (struct via_info *card, @@ -3221,7 +3222,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file, break; } - up (&card->syscall_sem); + mutex_unlock(&card->syscall_mutex); DPRINTK ("EXIT, returning %d\n", rc); return rc; } @@ -3264,12 +3265,12 @@ static int via_dsp_open (struct inode *inode, struct file *file) match: if (nonblock) { - if (down_trylock (&card->open_sem)) { + if (!mutex_trylock(&card->open_mutex)) { DPRINTK ("EXIT, returning -EAGAIN\n"); return -EAGAIN; } } else { - if (down_interruptible (&card->open_sem)) { + if (mutex_lock_interruptible(&card->open_mutex)) { DPRINTK ("EXIT, returning -ERESTARTSYS\n"); return -ERESTARTSYS; } @@ -3355,8 +3356,8 @@ static int via_dsp_release(struct inode *inode, struct file *file) via_chan_buffer_free (card, &card->ch_in); } - up (&card->syscall_sem); - up (&card->open_sem); + mutex_unlock(&card->syscall_mutex); + mutex_unlock(&card->open_mutex); DPRINTK ("EXIT, returning 0\n"); return 0; @@ -3414,8 +3415,8 @@ static int __devinit via_init_one (struct pci_dev *pdev, const struct pci_device card->card_num = via_num_cards++; spin_lock_init (&card->lock); spin_lock_init (&card->ac97_lock); - init_MUTEX (&card->syscall_sem); - init_MUTEX (&card->open_sem); + mutex_init(&card->syscall_mutex); + mutex_init(&card->open_mutex); /* we must init these now, in case the intr handler needs them */ via_chan_init_defaults (card, &card->ch_out); diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c index 265423054ca..b372e88e857 100644 --- a/sound/oss/vwsnd.c +++ b/sound/oss/vwsnd.c @@ -94,7 +94,7 @@ * Open will block until the previous client has closed the * device, unless O_NONBLOCK is specified. * - * The semaphore devc->io_sema serializes PCM I/O syscalls. This + * The semaphore devc->io_mutex serializes PCM I/O syscalls. This * is unnecessary in Linux 2.2, because the kernel lock * serializes read, write, and ioctl globally, but it's there, * ready for the brave, new post-kernel-lock world. @@ -105,7 +105,7 @@ * area it owns and update its pointers. See pcm_output() and * pcm_input() for most of the gory stuff. * - * devc->mix_sema serializes all mixer ioctls. This is also + * devc->mix_mutex serializes all mixer ioctls. This is also * redundant because of the kernel lock. * * The lowest level lock is lith->lithium_lock. It is a @@ -148,7 +148,8 @@ #include <linux/smp_lock.h> #include <linux/wait.h> #include <linux/interrupt.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> + #include <asm/mach-visws/cobalt.h> #include "sound_config.h" @@ -1447,11 +1448,11 @@ typedef enum vwsnd_port_flags { * * port->lock protects: hwstate, flags, swb_[iu]_avail. * - * devc->io_sema protects: swstate, sw_*, swb_[iu]_idx. + * devc->io_mutex protects: swstate, sw_*, swb_[iu]_idx. * * everything else is only written by open/release or * pcm_{setup,shutdown}(), which are serialized by a - * combination of devc->open_sema and devc->io_sema. + * combination of devc->open_mutex and devc->io_mutex. */ typedef struct vwsnd_port { @@ -1507,9 +1508,9 @@ typedef struct vwsnd_dev { int audio_minor; /* minor number of audio device */ int mixer_minor; /* minor number of mixer device */ - struct semaphore open_sema; - struct semaphore io_sema; - struct semaphore mix_sema; + struct mutex open_mutex; + struct mutex io_mutex; + struct mutex mix_mutex; mode_t open_mode; wait_queue_head_t open_wait; @@ -1633,7 +1634,7 @@ static __inline__ unsigned int swb_inc_i(vwsnd_port_t *port, int inc) * mode-setting ioctls have been done, but before the first I/O is * done. * - * Locking: called with devc->io_sema held. + * Locking: called with devc->io_mutex held. * * Returns 0 on success, -errno on failure. */ @@ -2319,9 +2320,9 @@ static ssize_t vwsnd_audio_read(struct file *file, vwsnd_dev_t *devc = file->private_data; ssize_t ret; - down(&devc->io_sema); + mutex_lock(&devc->io_mutex); ret = vwsnd_audio_do_read(file, buffer, count, ppos); - up(&devc->io_sema); + mutex_unlock(&devc->io_mutex); return ret; } @@ -2394,9 +2395,9 @@ static ssize_t vwsnd_audio_write(struct file *file, vwsnd_dev_t *devc = file->private_data; ssize_t ret; - down(&devc->io_sema); + mutex_lock(&devc->io_mutex); ret = vwsnd_audio_do_write(file, buffer, count, ppos); - up(&devc->io_sema); + mutex_unlock(&devc->io_mutex); return ret; } @@ -2891,9 +2892,9 @@ static int vwsnd_audio_ioctl(struct inode *inode, vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; int ret; - down(&devc->io_sema); + mutex_lock(&devc->io_mutex); ret = vwsnd_audio_do_ioctl(inode, file, cmd, arg); - up(&devc->io_sema); + mutex_unlock(&devc->io_mutex); return ret; } @@ -2929,9 +2930,9 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) return -ENODEV; } - down(&devc->open_sema); + mutex_lock(&devc->open_mutex); while (devc->open_mode & file->f_mode) { - up(&devc->open_sema); + mutex_unlock(&devc->open_mutex); if (file->f_flags & O_NONBLOCK) { DEC_USE_COUNT; return -EBUSY; @@ -2941,10 +2942,10 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) DEC_USE_COUNT; return -ERESTARTSYS; } - down(&devc->open_sema); + mutex_lock(&devc->open_mutex); } devc->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - up(&devc->open_sema); + mutex_unlock(&devc->open_mutex); /* get default sample format from minor number. */ @@ -2960,7 +2961,7 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) /* Initialize vwsnd_ports. */ - down(&devc->io_sema); + mutex_lock(&devc->io_mutex); { if (file->f_mode & FMODE_READ) { devc->rport.swstate = SW_INITIAL; @@ -2987,7 +2988,7 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) devc->wport.frag_count = 0; } } - up(&devc->io_sema); + mutex_unlock(&devc->io_mutex); file->private_data = devc; DBGRV(); @@ -3005,7 +3006,7 @@ static int vwsnd_audio_release(struct inode *inode, struct file *file) int err = 0; lock_kernel(); - down(&devc->io_sema); + mutex_lock(&devc->io_mutex); { DBGEV("(inode=0x%p, file=0x%p)\n", inode, file); @@ -3022,13 +3023,13 @@ static int vwsnd_audio_release(struct inode *inode, struct file *file) if (wport) wport->swstate = SW_OFF; } - up(&devc->io_sema); + mutex_unlock(&devc->io_mutex); - down(&devc->open_sema); + mutex_lock(&devc->open_mutex); { devc->open_mode &= ~file->f_mode; } - up(&devc->open_sema); + mutex_unlock(&devc->open_mutex); wake_up(&devc->open_wait); DEC_USE_COUNT; DBGR(); @@ -3213,7 +3214,7 @@ static int vwsnd_mixer_ioctl(struct inode *ioctl, DBGEV("(devc=0x%p, cmd=0x%x, arg=0x%lx)\n", devc, cmd, arg); - down(&devc->mix_sema); + mutex_lock(&devc->mix_mutex); { if ((cmd & ~nrmask) == MIXER_READ(0)) retval = mixer_read_ioctl(devc, nr, (void __user *) arg); @@ -3222,7 +3223,7 @@ static int vwsnd_mixer_ioctl(struct inode *ioctl, else retval = -EINVAL; } - up(&devc->mix_sema); + mutex_unlock(&devc->mix_mutex); return retval; } @@ -3376,9 +3377,9 @@ static int __init attach_vwsnd(struct address_info *hw_config) /* Initialize as much of *devc as possible */ - init_MUTEX(&devc->open_sema); - init_MUTEX(&devc->io_sema); - init_MUTEX(&devc->mix_sema); + mutex_init(&devc->open_mutex); + mutex_init(&devc->io_mutex); + mutex_init(&devc->mix_mutex); devc->open_mode = 0; spin_lock_init(&devc->rport.lock); init_waitqueue_head(&devc->rport.queue); diff --git a/sound/oss/ymfpci.c b/sound/oss/ymfpci.c index f8bd72e46f5..bf90c124a7e 100644 --- a/sound/oss/ymfpci.c +++ b/sound/oss/ymfpci.c @@ -1918,10 +1918,10 @@ static int ymf_open(struct inode *inode, struct file *file) if (unit == NULL) return -ENODEV; - down(&unit->open_sem); + mutex_lock(&unit->open_mutex); if ((state = ymf_state_alloc(unit)) == NULL) { - up(&unit->open_sem); + mutex_unlock(&unit->open_mutex); return -ENOMEM; } list_add_tail(&state->chain, &unit->states); @@ -1956,7 +1956,7 @@ static int ymf_open(struct inode *inode, struct file *file) ymfpci_writeb(unit, YDSXGR_TIMERCTRL, (YDSXGR_TIMERCTRL_TEN|YDSXGR_TIMERCTRL_TIEN)); #endif - up(&unit->open_sem); + mutex_unlock(&unit->open_mutex); return nonseekable_open(inode, file); @@ -1974,7 +1974,7 @@ out_nodma: list_del(&state->chain); kfree(state); - up(&unit->open_sem); + mutex_unlock(&unit->open_mutex); return err; } @@ -1987,7 +1987,7 @@ static int ymf_release(struct inode *inode, struct file *file) ymfpci_writeb(unit, YDSXGR_TIMERCTRL, 0); #endif - down(&unit->open_sem); + mutex_lock(&unit->open_mutex); /* * XXX Solve the case of O_NONBLOCK close - don't deallocate here. @@ -2004,7 +2004,7 @@ static int ymf_release(struct inode *inode, struct file *file) file->private_data = NULL; /* Can you tell I programmed Solaris */ kfree(state); - up(&unit->open_sem); + mutex_unlock(&unit->open_mutex); return 0; } @@ -2532,7 +2532,7 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi spin_lock_init(&codec->reg_lock); spin_lock_init(&codec->voice_lock); spin_lock_init(&codec->ac97_lock); - init_MUTEX(&codec->open_sem); + mutex_init(&codec->open_mutex); INIT_LIST_HEAD(&codec->states); codec->pci = pcidev; diff --git a/sound/oss/ymfpci.h b/sound/oss/ymfpci.h index f810a100c64..ac1785f2b7e 100644 --- a/sound/oss/ymfpci.h +++ b/sound/oss/ymfpci.h @@ -22,6 +22,7 @@ * */ #include <linux/config.h> +#include <linux/mutex.h> /* * Direct registers @@ -279,7 +280,7 @@ struct ymf_unit { /* soundcore stuff */ int dev_audio; - struct semaphore open_sem; + struct mutex open_mutex; struct list_head ymf_devs; struct list_head states; /* List of states for this unit */ diff --git a/sound/ppc/daca.c b/sound/ppc/daca.c index 08cde51177d..aa09ebd9ffb 100644 --- a/sound/ppc/daca.c +++ b/sound/ppc/daca.c @@ -22,7 +22,6 @@ #include <sound/driver.h> #include <linux/init.h> #include <linux/i2c.h> -#include <linux/i2c-dev.h> #include <linux/kmod.h> #include <linux/slab.h> #include <sound/core.h> diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c index 6058c2dd1b7..fb05938dcbd 100644 --- a/sound/ppc/keywest.c +++ b/sound/ppc/keywest.c @@ -23,7 +23,6 @@ #include <linux/init.h> #include <linux/i2c.h> #include <linux/delay.h> -#include <linux/i2c-dev.h> #include <linux/slab.h> #include <sound/core.h> #include "pmac.h" diff --git a/sound/ppc/toonie.c b/sound/ppc/toonie.c index 053b8f24e4d..210be20dc27 100644 --- a/sound/ppc/toonie.c +++ b/sound/ppc/toonie.c @@ -22,7 +22,6 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/i2c.h> -#include <linux/i2c-dev.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/interrupt.h> diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c index 838fc113c44..1146dd882bb 100644 --- a/sound/ppc/tumbler.c +++ b/sound/ppc/tumbler.c @@ -28,7 +28,6 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/i2c.h> -#include <linux/i2c-dev.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/interrupt.h> |