diff options
547 files changed, 21273 insertions, 8234 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory index 9fe91c02ee4..bf1627b02a0 100644 --- a/Documentation/ABI/testing/sysfs-devices-memory +++ b/Documentation/ABI/testing/sysfs-devices-memory @@ -60,6 +60,19 @@ Description: Users: hotplug memory remove tools https://w3.opensource.ibm.com/projects/powerpc-utils/ + +What: /sys/devices/system/memoryX/nodeY +Date: October 2009 +Contact: Linux Memory Management list <linux-mm@kvack.org> +Description: + When CONFIG_NUMA is enabled, a symbolic link that + points to the corresponding NUMA node directory. + + For example, the following symbolic link is created for + memory section 9 on node0: + /sys/devices/system/memory/memory9/node0 -> ../../node/node0 + + What: /sys/devices/system/node/nodeX/memoryY Date: September 2008 Contact: Gary Hade <garyhade@us.ibm.com> @@ -70,4 +83,3 @@ Description: memory section directory. For example, the following symbolic link is created for memory section 9 on node0. /sys/devices/system/node/node0/memory9 -> ../../memory/memory9 - diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 2aae06fcbed..84a710f87c6 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -92,6 +92,20 @@ Description: Discover NUMA node a CPU belongs to /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2 +What: /sys/devices/system/cpu/cpu#/node +Date: October 2009 +Contact: Linux memory management mailing list <linux-mm@kvack.org> +Description: Discover NUMA node a CPU belongs to + + When CONFIG_NUMA is enabled, a symbolic link that points + to the corresponding NUMA node directory. + + For example, the following symlink is created for cpu42 + in NUMA node 2: + + /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2 + + What: /sys/devices/system/cpu/cpu#/topology/core_id /sys/devices/system/cpu/cpu#/topology/core_siblings /sys/devices/system/cpu/cpu#/topology/core_siblings_list diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt index a5009c8300f..e3a77b21513 100644 --- a/Documentation/device-mapper/snapshot.txt +++ b/Documentation/device-mapper/snapshot.txt @@ -8,13 +8,19 @@ the block device which are also writable without interfering with the original content; *) To create device "forks", i.e. multiple different versions of the same data stream. +*) To merge a snapshot of a block device back into the snapshot's origin +device. +In the first two cases, dm copies only the chunks of data that get +changed and uses a separate copy-on-write (COW) block device for +storage. -In both cases, dm copies only the chunks of data that get changed and -uses a separate copy-on-write (COW) block device for storage. +For snapshot merge the contents of the COW storage are merged back into +the origin device. -There are two dm targets available: snapshot and snapshot-origin. +There are three dm targets available: +snapshot, snapshot-origin, and snapshot-merge. *) snapshot-origin <origin> @@ -40,8 +46,25 @@ The difference is that for transient snapshots less metadata must be saved on disk - they can be kept in memory by the kernel. -How this is used by LVM2 -======================== +* snapshot-merge <origin> <COW device> <persistent> <chunksize> + +takes the same table arguments as the snapshot target except it only +works with persistent snapshots. This target assumes the role of the +"snapshot-origin" target and must not be loaded if the "snapshot-origin" +is still present for <origin>. + +Creates a merging snapshot that takes control of the changed chunks +stored in the <COW device> of an existing snapshot, through a handover +procedure, and merges these chunks back into the <origin>. Once merging +has started (in the background) the <origin> may be opened and the merge +will continue while I/O is flowing to it. Changes to the <origin> are +deferred until the merging snapshot's corresponding chunk(s) have been +merged. Once merging has started the snapshot device, associated with +the "snapshot" target, will return -EIO when accessed. + + +How snapshot is used by LVM2 +============================ When you create the first LVM2 snapshot of a volume, four dm devices are used: 1) a device containing the original mapping table of the source volume; @@ -72,3 +95,30 @@ brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base + +How snapshot-merge is used by LVM2 +================================== +A merging snapshot assumes the role of the "snapshot-origin" while +merging. As such the "snapshot-origin" is replaced with +"snapshot-merge". The "-real" device is not changed and the "-cow" +device is renamed to <origin name>-cow to aid LVM2's cleanup of the +merging snapshot after it completes. The "snapshot" that hands over its +COW device to the "snapshot-merge" is deactivated (unless using lvchange +--refresh); but if it is left active it will simply return I/O errors. + +A snapshot will merge into its origin with the following command: + +lvconvert --merge volumeGroup/snap + +we'll now have this situation: + +# dmsetup table|grep volumeGroup + +volumeGroup-base-real: 0 2097152 linear 8:19 384 +volumeGroup-base-cow: 0 204800 linear 8:19 2097536 +volumeGroup-base: 0 2097152 snapshot-merge 254:11 254:12 P 16 + +# ls -lL /dev/mapper/volumeGroup-* +brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real +brw------- 1 root root 254, 12 29 ago 18:16 /dev/mapper/volumeGroup-base-cow +brw------- 1 root root 254, 10 29 ago 18:16 /dev/mapper/volumeGroup-base diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index eb2c138c277..21ab9357326 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -291,15 +291,6 @@ Who: Michael Buesch <mb@bu3sch.de> --------------------------- -What: print_fn_descriptor_symbol() -When: October 2009 -Why: The %pF vsprintf format provides the same functionality in a - simpler way. print_fn_descriptor_symbol() is deprecated but - still present to give out-of-tree modules time to change. -Who: Bjorn Helgaas <bjorn.helgaas@hp.com> - ---------------------------- - What: /sys/o2cb symlink When: January 2010 Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 94b9f2056f4..220cc6376ef 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -38,6 +38,7 @@ Table of Contents 3.3 /proc/<pid>/io - Display the IO accounting fields 3.4 /proc/<pid>/coredump_filter - Core dump filtering settings 3.5 /proc/<pid>/mountinfo - Information about mounts + 3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm ------------------------------------------------------------------------------ @@ -1409,3 +1410,11 @@ For more information on mount propagation see: Documentation/filesystems/sharedsubtree.txt + +3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm +-------------------------------------------------------- +These files provide a method to access a tasks comm value. It also allows for +a task to set its own or one of its thread siblings comm value. The comm value +is limited in size compared to the cmdline value, so writing anything longer +then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated +comm value. diff --git a/Documentation/hwmon/lis3lv02d b/Documentation/hwmon/lis3lv02d index effe949a728..06534f25e64 100644 --- a/Documentation/hwmon/lis3lv02d +++ b/Documentation/hwmon/lis3lv02d @@ -3,7 +3,8 @@ Kernel driver lis3lv02d Supported chips: - * STMicroelectronics LIS3LV02DL and LIS3LV02DQ + * STMicroelectronics LIS3LV02DL, LIS3LV02DQ (12 bits precision) + * STMicroelectronics LIS302DL, LIS3L02DQ, LIS331DL (8 bits) Authors: Yan Burman <burman.yan@gmail.com> @@ -13,32 +14,52 @@ Authors: Description ----------- -This driver provides support for the accelerometer found in various HP -laptops sporting the feature officially called "HP Mobile Data -Protection System 3D" or "HP 3D DriveGuard". It detects automatically -laptops with this sensor. Known models (for now the HP 2133, nc6420, -nc2510, nc8510, nc84x0, nw9440 and nx9420) will have their axis -automatically oriented on standard way (eg: you can directly play -neverball). The accelerometer data is readable via -/sys/devices/platform/lis3lv02d. +This driver provides support for the accelerometer found in various HP laptops +sporting the feature officially called "HP Mobile Data Protection System 3D" or +"HP 3D DriveGuard". It detects automatically laptops with this sensor. Known +models (full list can be found in drivers/hwmon/hp_accel.c) will have their +axis automatically oriented on standard way (eg: you can directly play +neverball). The accelerometer data is readable via +/sys/devices/platform/lis3lv02d. Reported values are scaled +to mg values (1/1000th of earth gravity). Sysfs attributes under /sys/devices/platform/lis3lv02d/: position - 3D position that the accelerometer reports. Format: "(x,y,z)" -calibrate - read: values (x, y, z) that are used as the base for input - class device operation. - write: forces the base to be recalibrated with the current - position. -rate - reports the sampling rate of the accelerometer device in HZ +rate - read reports the sampling rate of the accelerometer device in HZ. + write changes sampling rate of the accelerometer device. + Only values which are supported by HW are accepted. +selftest - performs selftest for the chip as specified by chip manufacturer. This driver also provides an absolute input class device, allowing -the laptop to act as a pinball machine-esque joystick. +the laptop to act as a pinball machine-esque joystick. Joystick device can be +calibrated. Joystick device can be in two different modes. +By default output values are scaled between -32768 .. 32767. In joystick raw +mode, joystick and sysfs position entry have the same scale. There can be +small difference due to input system fuzziness feature. +Events are also available as input event device. + +Selftest is meant only for hardware diagnostic purposes. It is not meant to be +used during normal operations. Position data is not corrupted during selftest +but interrupt behaviour is not guaranteed to work reliably. In test mode, the +sensing element is internally moved little bit. Selftest measures difference +between normal mode and test mode. Chip specifications tell the acceptance +limit for each type of the chip. Limits are provided via platform data +to allow adjustment of the limits without a change to the actual driver. +Seltest returns either "OK x y z" or "FAIL x y z" where x, y and z are +measured difference between modes. Axes are not remapped in selftest mode. +Measurement values are provided to help HW diagnostic applications to make +final decision. + +On HP laptops, if the led infrastructure is activated, support for a led +indicating disk protection will be provided as /sys/class/leds/hp::hddprotect. Another feature of the driver is misc device called "freefall" that acts similar to /dev/rtc and reacts on free-fall interrupts received from the device. It supports blocking operations, poll/select and fasync operation modes. You must read 1 bytes from the device. The result is number of free-fall interrupts since the last successful -read (or 255 if number of interrupts would not fit). +read (or 255 if number of interrupts would not fit). See the hpfall.c +file for an example on using the device. Axes orientation @@ -55,7 +76,7 @@ the accelerometer are converted into a "standard" organisation of the axes * If the laptop is put upside-down, Z becomes negative If your laptop model is not recognized (cf "dmesg"), you can send an -email to the authors to add it to the database. When reporting a new +email to the maintainer to add it to the database. When reporting a new laptop, please include the output of "dmidecode" plus the value of /sys/devices/platform/lis3lv02d/position in these four cases. diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf index 02b74899eda..b7e42ec4b26 100644 --- a/Documentation/hwmon/w83627ehf +++ b/Documentation/hwmon/w83627ehf @@ -81,8 +81,14 @@ pwm[1-4] - this file stores PWM duty cycle or DC value (fan speed) in range: 0 (stop) to 255 (full) pwm[1-4]_enable - this file controls mode of fan/temperature control: - * 1 Manual Mode, write to pwm file any value 0-255 (full speed) - * 2 Thermal Cruise + * 1 Manual mode, write to pwm file any value 0-255 (full speed) + * 2 "Thermal Cruise" mode + * 3 "Fan Speed Cruise" mode + * 4 "Smart Fan III" mode + +pwm[1-4]_mode - controls if output is PWM or DC level + * 0 DC output (0 - 12v) + * 1 PWM output Thermal Cruise mode ------------------- diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients index 7860aafb483..0a74603eb67 100644 --- a/Documentation/i2c/writing-clients +++ b/Documentation/i2c/writing-clients @@ -44,7 +44,7 @@ static struct i2c_driver foo_driver = { /* if device autodetection is needed: */ .class = I2C_CLASS_SOMETHING, .detect = foo_detect, - .address_data = &addr_data, + .address_list = normal_i2c, .shutdown = foo_shutdown, /* optional */ .suspend = foo_suspend, /* optional */ diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt index bbc8a6a3692..57e7e9cc187 100644 --- a/Documentation/memory-hotplug.txt +++ b/Documentation/memory-hotplug.txt @@ -160,12 +160,15 @@ Under each section, you can see 4 files. NOTE: These directories/files appear after physical memory hotplug phase. -If CONFIG_NUMA is enabled the -/sys/devices/system/memory/memoryXXX memory section -directories can also be accessed via symbolic links located in -the /sys/devices/system/node/node* directories. For example: +If CONFIG_NUMA is enabled the memoryXXX/ directories can also be accessed +via symbolic links located in the /sys/devices/system/node/node* directories. + +For example: /sys/devices/system/node/node0/memory9 -> ../../memory/memory9 +A backlink will also be created: +/sys/devices/system/memory/memory9/node0 -> ../../node/node0 + -------------------------------- 4. Physical memory hot-add phase -------------------------------- diff --git a/Documentation/misc-devices/ad525x_dpot.txt b/Documentation/misc-devices/ad525x_dpot.txt new file mode 100644 index 00000000000..0c9413b1cbf --- /dev/null +++ b/Documentation/misc-devices/ad525x_dpot.txt @@ -0,0 +1,57 @@ +--------------------------------- + AD525x Digital Potentiometers +--------------------------------- + +The ad525x_dpot driver exports a simple sysfs interface. This allows you to +work with the immediate resistance settings as well as update the saved startup +settings. Access to the factory programmed tolerance is also provided, but +interpretation of this settings is required by the end application according to +the specific part in use. + +--------- + Files +--------- + +Each dpot device will have a set of eeprom, rdac, and tolerance files. How +many depends on the actual part you have, as will the range of allowed values. + +The eeprom files are used to program the startup value of the device. + +The rdac files are used to program the immediate value of the device. + +The tolerance files are the read-only factory programmed tolerance settings +and may vary greatly on a part-by-part basis. For exact interpretation of +this field, please consult the datasheet for your part. This is presented +as a hex file for easier parsing. + +----------- + Example +----------- + +Locate the device in your sysfs tree. This is probably easiest by going into +the common i2c directory and locating the device by the i2c slave address. + + # ls /sys/bus/i2c/devices/ + 0-0022 0-0027 0-002f + +So assuming the device in question is on the first i2c bus and has the slave +address of 0x2f, we descend (unrelated sysfs entries have been trimmed). + + # ls /sys/bus/i2c/devices/0-002f/ + eeprom0 rdac0 tolerance0 + +You can use simple reads/writes to access these files: + + # cd /sys/bus/i2c/devices/0-002f/ + + # cat eeprom0 + 0 + # echo 10 > eeprom0 + # cat eeprom0 + 10 + + # cat rdac0 + 5 + # echo 3 > rdac0 + # cat rdac0 + 3 diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt index b565e8279d1..8e1ddec2c78 100644 --- a/Documentation/nommu-mmap.txt +++ b/Documentation/nommu-mmap.txt @@ -119,6 +119,32 @@ FURTHER NOTES ON NO-MMU MMAP granule but will only discard the excess if appropriately configured as this has an effect on fragmentation. + (*) The memory allocated by a request for an anonymous mapping will normally + be cleared by the kernel before being returned in accordance with the + Linux man pages (ver 2.22 or later). + + In the MMU case this can be achieved with reasonable performance as + regions are backed by virtual pages, with the contents only being mapped + to cleared physical pages when a write happens on that specific page + (prior to which, the pages are effectively mapped to the global zero page + from which reads can take place). This spreads out the time it takes to + initialize the contents of a page - depending on the write-usage of the + mapping. + + In the no-MMU case, however, anonymous mappings are backed by physical + pages, and the entire map is cleared at allocation time. This can cause + significant delays during a userspace malloc() as the C library does an + anonymous mapping and the kernel then does a memset for the entire map. + + However, for memory that isn't required to be precleared - such as that + returned by malloc() - mmap() can take a MAP_UNINITIALIZED flag to + indicate to the kernel that it shouldn't bother clearing the memory before + returning it. Note that CONFIG_MMAP_ALLOW_UNINITIALIZED must be enabled + to permit this, otherwise the flag will be ignored. + + uClibc uses this to speed up malloc(), and the ELF-FDPIC binfmt uses this + to allocate the brk and stack region. + (*) A list of all the private copy and anonymous mappings on the system is visible through /proc/maps in no-MMU mode. diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt index 82a7bd1800b..bc31636973e 100644 --- a/Documentation/vm/hugetlbpage.txt +++ b/Documentation/vm/hugetlbpage.txt @@ -11,23 +11,21 @@ This optimization is more critical now as bigger and bigger physical memories (several GBs) are more readily available. Users can use the huge page support in Linux kernel by either using the mmap -system call or standard SYSv shared memory system calls (shmget, shmat). +system call or standard SYSV shared memory system calls (shmget, shmat). First the Linux kernel needs to be built with the CONFIG_HUGETLBFS (present under "File systems") and CONFIG_HUGETLB_PAGE (selected automatically when CONFIG_HUGETLBFS is selected) configuration options. -The kernel built with huge page support should show the number of configured -huge pages in the system by running the "cat /proc/meminfo" command. +The /proc/meminfo file provides information about the total number of +persistent hugetlb pages in the kernel's huge page pool. It also displays +information about the number of free, reserved and surplus huge pages and the +default huge page size. The huge page size is needed for generating the +proper alignment and size of the arguments to system calls that map huge page +regions. -/proc/meminfo also provides information about the total number of hugetlb -pages configured in the kernel. It also displays information about the -number of free hugetlb pages at any time. It also displays information about -the configured huge page size - this is needed for generating the proper -alignment and size of the arguments to the above system calls. - -The output of "cat /proc/meminfo" will have lines like: +The output of "cat /proc/meminfo" will include lines like: ..... HugePages_Total: vvv @@ -53,59 +51,63 @@ HugePages_Surp is short for "surplus," and is the number of huge pages in /proc/filesystems should also show a filesystem of type "hugetlbfs" configured in the kernel. -/proc/sys/vm/nr_hugepages indicates the current number of configured hugetlb -pages in the kernel. Super user can dynamically request more (or free some -pre-configured) huge pages. -The allocation (or deallocation) of hugetlb pages is possible only if there are -enough physically contiguous free pages in system (freeing of huge pages is -possible only if there are enough hugetlb pages free that can be transferred -back to regular memory pool). +/proc/sys/vm/nr_hugepages indicates the current number of "persistent" huge +pages in the kernel's huge page pool. "Persistent" huge pages will be +returned to the huge page pool when freed by a task. A user with root +privileges can dynamically allocate more or free some persistent huge pages +by increasing or decreasing the value of 'nr_hugepages'. -Pages that are used as hugetlb pages are reserved inside the kernel and cannot -be used for other purposes. +Pages that are used as huge pages are reserved inside the kernel and cannot +be used for other purposes. Huge pages cannot be swapped out under +memory pressure. -Once the kernel with Hugetlb page support is built and running, a user can -use either the mmap system call or shared memory system calls to start using -the huge pages. It is required that the system administrator preallocate -enough memory for huge page purposes. +Once a number of huge pages have been pre-allocated to the kernel huge page +pool, a user with appropriate privilege can use either the mmap system call +or shared memory system calls to use the huge pages. See the discussion of +Using Huge Pages, below. -The administrator can preallocate huge pages on the kernel boot command line by -specifying the "hugepages=N" parameter, where 'N' = the number of huge pages -requested. This is the most reliable method for preallocating huge pages as -memory has not yet become fragmented. +The administrator can allocate persistent huge pages on the kernel boot +command line by specifying the "hugepages=N" parameter, where 'N' = the +number of huge pages requested. This is the most reliable method of +allocating huge pages as memory has not yet become fragmented. -Some platforms support multiple huge page sizes. To preallocate huge pages +Some platforms support multiple huge page sizes. To allocate huge pages of a specific size, one must preceed the huge pages boot command parameters with a huge page size selection parameter "hugepagesz=<size>". <size> must be specified in bytes with optional scale suffix [kKmMgG]. The default huge page size may be selected with the "default_hugepagesz=<size>" boot parameter. -/proc/sys/vm/nr_hugepages indicates the current number of configured [default -size] hugetlb pages in the kernel. Super user can dynamically request more -(or free some pre-configured) huge pages. - -Use the following command to dynamically allocate/deallocate default sized -huge pages: +When multiple huge page sizes are supported, /proc/sys/vm/nr_hugepages +indicates the current number of pre-allocated huge pages of the default size. +Thus, one can use the following command to dynamically allocate/deallocate +default sized persistent huge pages: echo 20 > /proc/sys/vm/nr_hugepages -This command will try to configure 20 default sized huge pages in the system. +This command will try to adjust the number of default sized huge pages in the +huge page pool to 20, allocating or freeing huge pages, as required. + On a NUMA platform, the kernel will attempt to distribute the huge page pool -over the all on-line nodes. These huge pages, allocated when nr_hugepages -is increased, are called "persistent huge pages". +over all the set of allowed nodes specified by the NUMA memory policy of the +task that modifies nr_hugepages. The default for the allowed nodes--when the +task has default memory policy--is all on-line nodes with memory. Allowed +nodes with insufficient available, contiguous memory for a huge page will be +silently skipped when allocating persistent huge pages. See the discussion +below of the interaction of task memory policy, cpusets and per node attributes +with the allocation and freeing of persistent huge pages. The success or failure of huge page allocation depends on the amount of -physically contiguous memory that is preset in system at the time of the +physically contiguous memory that is present in system at the time of the allocation attempt. If the kernel is unable to allocate huge pages from some nodes in a NUMA system, it will attempt to make up the difference by allocating extra pages on other nodes with sufficient available contiguous memory, if any. -System administrators may want to put this command in one of the local rc init -files. This will enable the kernel to request huge pages early in the boot -process when the possibility of getting physical contiguous pages is still -very high. Administrators can verify the number of huge pages actually -allocated by checking the sysctl or meminfo. To check the per node +System administrators may want to put this command in one of the local rc +init files. This will enable the kernel to allocate huge pages early in +the boot process when the possibility of getting physical contiguous pages +is still very high. Administrators can verify the number of huge pages +actually allocated by checking the sysctl or meminfo. To check the per node distribution of huge pages in a NUMA system, use: cat /sys/devices/system/node/node*/meminfo | fgrep Huge @@ -113,45 +115,47 @@ distribution of huge pages in a NUMA system, use: /proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are requested by applications. Writing any non-zero value into this file -indicates that the hugetlb subsystem is allowed to try to obtain "surplus" -huge pages from the buddy allocator, when the normal pool is exhausted. As -these surplus huge pages go out of use, they are freed back to the buddy -allocator. +indicates that the hugetlb subsystem is allowed to try to obtain that +number of "surplus" huge pages from the kernel's normal page pool, when the +persistent huge page pool is exhausted. As these surplus huge pages become +unused, they are freed back to the kernel's normal page pool. -When increasing the huge page pool size via nr_hugepages, any surplus +When increasing the huge page pool size via nr_hugepages, any existing surplus pages will first be promoted to persistent huge pages. Then, additional huge pages will be allocated, if necessary and if possible, to fulfill -the new huge page pool size. +the new persistent huge page pool size. -The administrator may shrink the pool of preallocated huge pages for +The administrator may shrink the pool of persistent huge pages for the default huge page size by setting the nr_hugepages sysctl to a smaller value. The kernel will attempt to balance the freeing of huge pages -across all on-line nodes. Any free huge pages on the selected nodes will -be freed back to the buddy allocator. - -Caveat: Shrinking the pool via nr_hugepages such that it becomes less -than the number of huge pages in use will convert the balance to surplus -huge pages even if it would exceed the overcommit value. As long as -this condition holds, however, no more surplus huge pages will be -allowed on the system until one of the two sysctls are increased -sufficiently, or the surplus huge pages go out of use and are freed. +across all nodes in the memory policy of the task modifying nr_hugepages. +Any free huge pages on the selected nodes will be freed back to the kernel's +normal page pool. + +Caveat: Shrinking the persistent huge page pool via nr_hugepages such that +it becomes less than the number of huge pages in use will convert the balance +of the in-use huge pages to surplus huge pages. This will occur even if +the number of surplus pages it would exceed the overcommit value. As long as +this condition holds--that is, until nr_hugepages+nr_overcommit_hugepages is +increased sufficiently, or the surplus huge pages go out of use and are freed-- +no more surplus huge pages will be allowed to be allocated. With support for multiple huge page pools at run-time available, much of -the huge page userspace interface has been duplicated in sysfs. The above -information applies to the default huge page size which will be -controlled by the /proc interfaces for backwards compatibility. The root -huge page control directory in sysfs is: +the huge page userspace interface in /proc/sys/vm has been duplicated in sysfs. +The /proc interfaces discussed above have been retained for backwards +compatibility. The root huge page control directory in sysfs is: /sys/kernel/mm/hugepages For each huge page size supported by the running kernel, a subdirectory -will exist, of the form +will exist, of the form: hugepages-${size}kB Inside each of these directories, the same set of files will exist: nr_hugepages + nr_hugepages_mempolicy nr_overcommit_hugepages free_hugepages resv_hugepages @@ -159,6 +163,102 @@ Inside each of these directories, the same set of files will exist: which function as described above for the default huge page-sized case. + +Interaction of Task Memory Policy with Huge Page Allocation/Freeing + +Whether huge pages are allocated and freed via the /proc interface or +the /sysfs interface using the nr_hugepages_mempolicy attribute, the NUMA +nodes from which huge pages are allocated or freed are controlled by the +NUMA memory policy of the task that modifies the nr_hugepages_mempolicy +sysctl or attribute. When the nr_hugepages attribute is used, mempolicy +is ignored. + +The recommended method to allocate or free huge pages to/from the kernel +huge page pool, using the nr_hugepages example above, is: + + numactl --interleave <node-list> echo 20 \ + >/proc/sys/vm/nr_hugepages_mempolicy + +or, more succinctly: + + numactl -m <node-list> echo 20 >/proc/sys/vm/nr_hugepages_mempolicy + +This will allocate or free abs(20 - nr_hugepages) to or from the nodes +specified in <node-list>, depending on whether number of persistent huge pages +is initially less than or greater than 20, respectively. No huge pages will be +allocated nor freed on any node not included in the specified <node-list>. + +When adjusting the persistent hugepage count via nr_hugepages_mempolicy, any +memory policy mode--bind, preferred, local or interleave--may be used. The +resulting effect on persistent huge page allocation is as follows: + +1) Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.txt], + persistent huge pages will be distributed across the node or nodes + specified in the mempolicy as if "interleave" had been specified. + However, if a node in the policy does not contain sufficient contiguous + memory for a huge page, the allocation will not "fallback" to the nearest + neighbor node with sufficient contiguous memory. To do this would cause + undesirable imbalance in the distribution of the huge page pool, or + possibly, allocation of persistent huge pages on nodes not allowed by + the task's memory policy. + +2) One or more nodes may be specified with the bind or interleave policy. + If more than one node is specified with the preferred policy, only the + lowest numeric id will be used. Local policy will select the node where + the task is running at the time the nodes_allowed mask is constructed. + For local policy to be deterministic, the task must be bound to a cpu or + cpus in a single node. Otherwise, the task could be migrated to some + other node at any time after launch and the resulting node will be + indeterminate. Thus, local policy is not very useful for this purpose. + Any of the other mempolicy modes may be used to specify a single node. + +3) The nodes allowed mask will be derived from any non-default task mempolicy, + whether this policy was set explicitly by the task itself or one of its + ancestors, such as numactl. This means that if the task is invoked from a + shell with non-default policy, that policy will be used. One can specify a + node list of "all" with numactl --interleave or --membind [-m] to achieve + interleaving over all nodes in the system or cpuset. + +4) Any task mempolicy specifed--e.g., using numactl--will be constrained by + the resource limits of any cpuset in which the task runs. Thus, there will + be no way for a task with non-default policy running in a cpuset with a + subset of the system nodes to allocate huge pages outside the cpuset + without first moving to a cpuset that contains all of the desired nodes. + +5) Boot-time huge page allocation attempts to distribute the requested number + of huge pages over all on-lines nodes with memory. + +Per Node Hugepages Attributes + +A subset of the contents of the root huge page control directory in sysfs, +described above, will be replicated under each the system device of each +NUMA node with memory in: + + /sys/devices/system/node/node[0-9]*/hugepages/ + +Under this directory, the subdirectory for each supported huge page size +contains the following attribute files: + + nr_hugepages + free_hugepages + surplus_hugepages + +The free_' and surplus_' attribute files are read-only. They return the number +of free and surplus [overcommitted] huge pages, respectively, on the parent +node. + +The nr_hugepages attribute returns the total number of huge pages on the +specified node. When this attribute is written, the number of persistent huge +pages on the parent node will be adjusted to the specified value, if sufficient +resources exist, regardless of the task's mempolicy or cpuset constraints. + +Note that the number of overcommit and reserve pages remain global quantities, +as we don't know until fault time, when the faulting task's mempolicy is +applied, from which node the huge page allocation will be attempted. + + +Using Huge Pages + If the user applications are going to request huge pages using mmap system call, then it is required that system administrator mount a file system of type hugetlbfs: @@ -206,9 +306,11 @@ map_hugetlb.c. * requesting huge pages. * * For the ia64 architecture, the Linux kernel reserves Region number 4 for - * huge pages. That means the addresses starting with 0x800000... will need - * to be specified. Specifying a fixed address is not required on ppc64, - * i386 or x86_64. + * huge pages. That means that if one requires a fixed address, a huge page + * aligned address starting with 0x800000... will be required. If a fixed + * address is not required, the kernel will select an address in the proper + * range. + * Other architectures, such as ppc64, i386 or x86_64 are not so constrained. * * Note: The default shared memory limit is quite low on many kernels, * you may need to increase it via: @@ -237,14 +339,8 @@ map_hugetlb.c. #define dprintf(x) printf(x) -/* Only ia64 requires this */ -#ifdef __ia64__ -#define ADDR (void *)(0x8000000000000000UL) -#define SHMAT_FLAGS (SHM_RND) -#else -#define ADDR (void *)(0x0UL) +#define ADDR (void *)(0x0UL) /* let kernel choose address */ #define SHMAT_FLAGS (0) -#endif int main(void) { @@ -302,10 +398,12 @@ int main(void) * example, the app is requesting memory of size 256MB that is backed by * huge pages. * - * For ia64 architecture, Linux kernel reserves Region number 4 for huge pages. - * That means the addresses starting with 0x800000... will need to be - * specified. Specifying a fixed address is not required on ppc64, i386 - * or x86_64. + * For the ia64 architecture, the Linux kernel reserves Region number 4 for + * huge pages. That means that if one requires a fixed address, a huge page + * aligned address starting with 0x800000... will be required. If a fixed + * address is not required, the kernel will select an address in the proper + * range. + * Other architectures, such as ppc64, i386 or x86_64 are not so constrained. */ #include <stdlib.h> #include <stdio.h> @@ -317,14 +415,8 @@ int main(void) #define LENGTH (256UL*1024*1024) #define PROTECTION (PROT_READ | PROT_WRITE) -/* Only ia64 requires this */ -#ifdef __ia64__ -#define ADDR (void *)(0x8000000000000000UL) -#define FLAGS (MAP_SHARED | MAP_FIXED) -#else -#define ADDR (void *)(0x0UL) +#define ADDR (void *)(0x0UL) /* let kernel choose address */ #define FLAGS (MAP_SHARED) -#endif void check_bytes(char *addr) { diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt index 262d8e6793a..b392e496f81 100644 --- a/Documentation/vm/ksm.txt +++ b/Documentation/vm/ksm.txt @@ -16,9 +16,9 @@ by sharing the data common between them. But it can be useful to any application which generates many instances of the same data. KSM only merges anonymous (private) pages, never pagecache (file) pages. -KSM's merged pages are at present locked into kernel memory for as long -as they are shared: so cannot be swapped out like the user pages they -replace (but swapping KSM pages should follow soon in a later release). +KSM's merged pages were originally locked into kernel memory, but can now +be swapped out just like other user pages (but sharing is broken when they +are swapped back in: ksmd must rediscover their identity and merge again). KSM only operates on those areas of address space which an application has advised to be likely candidates for merging, by using the madvise(2) @@ -44,20 +44,12 @@ includes unmapped gaps (though working on the intervening mapped areas), and might fail with EAGAIN if not enough memory for internal structures. Applications should be considerate in their use of MADV_MERGEABLE, -restricting its use to areas likely to benefit. KSM's scans may use -a lot of processing power, and its kernel-resident pages are a limited -resource. Some installations will disable KSM for these reasons. +restricting its use to areas likely to benefit. KSM's scans may use a lot +of processing power: some installations will disable KSM for that reason. The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/, readable by all but writable only by root: -max_kernel_pages - set to maximum number of kernel pages that KSM may use - e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages" - Value 0 imposes no limit on the kernel pages KSM may use; - but note that any process using MADV_MERGEABLE can cause - KSM to allocate these pages, unswappable until it exits. - Default: quarter of memory (chosen to not pin too much) - pages_to_scan - how many present pages to scan before ksmd goes to sleep e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan" Default: 100 (chosen for demonstration purposes) @@ -75,7 +67,7 @@ run - set 0 to stop ksmd from running but keep merged pages, The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: -pages_shared - how many shared unswappable kernel pages KSM is using +pages_shared - how many shared pages are being used pages_sharing - how many more sites are sharing them i.e. how much saved pages_unshared - how many pages unique but repeatedly checked for merging pages_volatile - how many pages changing too fast to be placed in a tree @@ -87,4 +79,4 @@ pages_volatile embraces several different kinds of activity, but a high proportion there would also indicate poor use of madvise MADV_MERGEABLE. Izik Eidus, -Hugh Dickins, 24 Sept 2009 +Hugh Dickins, 17 Nov 2009 diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c index ea44ea502da..7a7d9bab32e 100644 --- a/Documentation/vm/page-types.c +++ b/Documentation/vm/page-types.c @@ -100,7 +100,7 @@ #define BIT(name) (1ULL << KPF_##name) #define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL)) -static char *page_flag_names[] = { +static const char *page_flag_names[] = { [KPF_LOCKED] = "L:locked", [KPF_ERROR] = "E:error", [KPF_REFERENCED] = "R:referenced", @@ -173,7 +173,7 @@ static int kpageflags_fd; static int opt_hwpoison; static int opt_unpoison; -static char *hwpoison_debug_fs = "/debug/hwpoison"; +static const char hwpoison_debug_fs[] = "/debug/hwpoison"; static int hwpoison_inject_fd; static int hwpoison_forget_fd; @@ -560,7 +560,7 @@ static void walk_pfn(unsigned long voffset, { uint64_t buf[KPAGEFLAGS_BATCH]; unsigned long batch; - unsigned long pages; + long pages; unsigned long i; while (count) { @@ -673,30 +673,35 @@ static void usage(void) printf( "page-types [options]\n" -" -r|--raw Raw mode, for kernel developers\n" -" -a|--addr addr-spec Walk a range of pages\n" -" -b|--bits bits-spec Walk pages with specified bits\n" -" -p|--pid pid Walk process address space\n" +" -r|--raw Raw mode, for kernel developers\n" +" -d|--describe flags Describe flags\n" +" -a|--addr addr-spec Walk a range of pages\n" +" -b|--bits bits-spec Walk pages with specified bits\n" +" -p|--pid pid Walk process address space\n" #if 0 /* planned features */ -" -f|--file filename Walk file address space\n" +" -f|--file filename Walk file address space\n" #endif -" -l|--list Show page details in ranges\n" -" -L|--list-each Show page details one by one\n" -" -N|--no-summary Don't show summay info\n" -" -X|--hwpoison hwpoison pages\n" -" -x|--unpoison unpoison pages\n" -" -h|--help Show this usage message\n" +" -l|--list Show page details in ranges\n" +" -L|--list-each Show page details one by one\n" +" -N|--no-summary Don't show summay info\n" +" -X|--hwpoison hwpoison pages\n" +" -x|--unpoison unpoison pages\n" +" -h|--help Show this usage message\n" +"flags:\n" +" 0x10 bitfield format, e.g.\n" +" anon bit-name, e.g.\n" +" 0x10,anon comma-separated list, e.g.\n" "addr-spec:\n" -" N one page at offset N (unit: pages)\n" -" N+M pages range from N to N+M-1\n" -" N,M pages range from N to M-1\n" -" N, pages range from N to end\n" -" ,M pages range from 0 to M-1\n" +" N one page at offset N (unit: pages)\n" +" N+M pages range from N to N+M-1\n" +" N,M pages range from N to M-1\n" +" N, pages range from N to end\n" +" ,M pages range from 0 to M-1\n" "bits-spec:\n" -" bit1,bit2 (flags & (bit1|bit2)) != 0\n" -" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n" -" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n" -" =bit1,bit2 flags == (bit1|bit2)\n" +" bit1,bit2 (flags & (bit1|bit2)) != 0\n" +" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n" +" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n" +" =bit1,bit2 flags == (bit1|bit2)\n" "bit-names:\n" ); @@ -884,13 +889,23 @@ static void parse_bits_mask(const char *optarg) add_bits_filter(mask, bits); } +static void describe_flags(const char *optarg) +{ + uint64_t flags = parse_flag_names(optarg, 0); -static struct option opts[] = { + printf("0x%016llx\t%s\t%s\n", + (unsigned long long)flags, + page_flag_name(flags), + page_flag_longname(flags)); +} + +static const struct option opts[] = { { "raw" , 0, NULL, 'r' }, { "pid" , 1, NULL, 'p' }, { "file" , 1, NULL, 'f' }, { "addr" , 1, NULL, 'a' }, { "bits" , 1, NULL, 'b' }, + { "describe" , 1, NULL, 'd' }, { "list" , 0, NULL, 'l' }, { "list-each" , 0, NULL, 'L' }, { "no-summary", 0, NULL, 'N' }, @@ -907,7 +922,7 @@ int main(int argc, char *argv[]) page_size = getpagesize(); while ((c = getopt_long(argc, argv, - "rp:f:a:b:lLNXxh", opts, NULL)) != -1) { + "rp:f:a:b:d:lLNXxh", opts, NULL)) != -1) { switch (c) { case 'r': opt_raw = 1; @@ -924,6 +939,9 @@ int main(int argc, char *argv[]) case 'b': parse_bits_mask(optarg); break; + case 'd': + describe_flags(optarg); + exit(0); case 'l': opt_list = 1; break; diff --git a/MAINTAINERS b/MAINTAINERS index 1f21c34124d..0a32c3ec6b1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -835,13 +835,13 @@ F: arch/arm/mach-pxa/palmte2.c F: arch/arm/mach-pxa/include/mach/palmtc.h F: arch/arm/mach-pxa/palmtc.c -ARM/PALM TREO 680 SUPPORT +ARM/PALM TREO SUPPORT M: Tomas Cech <sleep_walker@suse.cz> L: linux-arm-kernel@lists.infradead.org W: http://hackndev.com S: Maintained -F: arch/arm/mach-pxa/include/mach/treo680.h -F: arch/arm/mach-pxa/treo680.c +F: arch/arm/mach-pxa/include/mach/palmtreo.h +F: arch/arm/mach-pxa/palmtreo.c ARM/PALMZ72 SUPPORT M: Sergey Lapin <slapin@ossfans.org> @@ -1482,8 +1482,8 @@ F: include/linux/coda*.h COMMON INTERNET FILE SYSTEM (CIFS) M: Steve French <sfrench@samba.org> -L: linux-cifs-client@lists.samba.org -L: samba-technical@lists.samba.org +L: linux-cifs-client@lists.samba.org (moderated for non-subscribers) +L: samba-technical@lists.samba.org (moderated for non-subscribers) W: http://linux-cifs.samba.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git S: Supported @@ -3081,8 +3081,11 @@ S: Maintained F: fs/autofs4/ KERNEL BUILD +M: Michal Marek <mmarek@suse.cz> +T: git git://repo.or.cz/linux-kbuild.git for-next +T: git git://repo.or.cz/linux-kbuild.git for-linus L: linux-kbuild@vger.kernel.org -S: Orphan +S: Maintained F: Documentation/kbuild/ F: Makefile F: scripts/Makefile.* @@ -3124,7 +3127,6 @@ L: kvm@vger.kernel.org W: http://kvm.qumranet.com S: Supported F: arch/x86/include/asm/svm.h -F: arch/x86/kvm/kvm_svm.h F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC @@ -5974,6 +5976,7 @@ M: Mark Brown <broonie@opensource.wolfsonmicro.com> T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus W: http://opensource.wolfsonmicro.com/node/8 S: Supported +F: Documentation/hwmon/wm83?? F: drivers/leds/leds-wm83*.c F: drivers/mfd/wm8*.c F: drivers/power/wm83*.c @@ -5983,9 +5986,9 @@ F: drivers/video/backlight/wm83*_bl.c F: drivers/watchdog/wm83*_wdt.c F: include/linux/mfd/wm831x/ F: include/linux/mfd/wm8350/ -F: include/linux/mfd/wm8400/ -F: sound/soc/codecs/wm8350.c -F: sound/soc/codecs/wm8400.c +F: include/linux/mfd/wm8400* +F: sound/soc/codecs/wm8350.* +F: sound/soc/codecs/wm8400.* X.25 NETWORK LAYER M: Henner Eisen <eis@baty.hanse.de> diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index 46bfff58f67..471c07292e0 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h @@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr) set_hae(msb); \ } -extern spinlock_t t2_hae_lock; +extern raw_spinlock_t t2_hae_lock; /* * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since @@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long result, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return __kernel_extbl(result, addr & 3); } @@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long result, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return __kernel_extwl(result, addr & 3); } @@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long result, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return result & 0xffffffffUL; } @@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long r0, r1, work, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; work = (addr << 5) + T2_SPARSE_MEM + 0x18; r0 = *(vuip)(work); r1 = *(vuip)(work + (4 << 5)); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return r1 << 32 | r0; } @@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb, w; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; w = __kernel_insbl(b, addr & 3); *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) @@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb, w; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; w = __kernel_inswl(b, addr & 3); *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } /* @@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) @@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb, work; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; work = (addr << 5) + T2_SPARSE_MEM + 0x18; *(vuip)work = b; *(vuip)(work + (4 << 5)) = b >> 32; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e38fb95cb33..d0faca1e992 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -12,18 +12,18 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(x) \ +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) -static inline void __raw_spin_unlock(raw_spinlock_t * lock) +static inline void arch_spin_unlock(arch_spinlock_t * lock) { mb(); lock->lock = 0; } -static inline void __raw_spin_lock(raw_spinlock_t * lock) +static inline void arch_spin_lock(arch_spinlock_t * lock) { long tmp; @@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock) : "m"(lock->lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return !test_and_set_bit(0, &lock->lock); } /***********************************************************/ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (lock->lock & 1) == 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return lock->lock == 0; } -static inline void __raw_read_lock(raw_rwlock_t *lock) +static inline void arch_read_lock(arch_rwlock_t *lock) { long regx; @@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *lock) +static inline void arch_write_lock(arch_rwlock_t *lock) { long regx; @@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t * lock) +static inline int arch_read_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock) return success; } -static inline int __raw_write_trylock(raw_rwlock_t * lock) +static inline int arch_write_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock) return success; } -static inline void __raw_read_unlock(raw_rwlock_t * lock) +static inline void arch_read_unlock(arch_rwlock_t * lock) { long regx; __asm__ __volatile__( @@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t * lock) +static inline void arch_write_unlock(arch_rwlock_t * lock) { mb(); lock->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ALPHA_SPINLOCK_H */ diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 8141eb5ebf0..54c2afce0a1 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c index d9980d47ab8..e6d90568b65 100644 --- a/arch/alpha/kernel/core_t2.c +++ b/arch/alpha/kernel/core_t2.c @@ -74,7 +74,7 @@ # define DBG(args) #endif -DEFINE_SPINLOCK(t2_hae_lock); +DEFINE_RAW_SPINLOCK(t2_hae_lock); static volatile unsigned int t2_mcheck_any_expected; static volatile unsigned int t2_mcheck_last_taken; diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index c0de072b830..5f2cf23c464 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v) #endif if (irq < ACTUAL_NR_IRQS) { - spin_lock_irqsave(&irq_desc[irq].lock, flags); + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); action = irq_desc[irq].action; if (!action) goto unlock; @@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } else if (irq == ACTUAL_NR_IRQS) { #ifdef CONFIG_SMP seq_puts(p, "IPI: "); diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c index d12af472e1c..dbbf04f9230 100644 --- a/arch/alpha/kernel/srm_env.c +++ b/arch/alpha/kernel/srm_env.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <asm/console.h> #include <asm/uaccess.h> #include <asm/machvec.h> @@ -79,42 +80,41 @@ static srm_env_t srm_named_entries[] = { static srm_env_t srm_numbered_entries[256]; -static int -srm_env_read(char *page, char **start, off_t off, int count, int *eof, - void *data) +static int srm_env_proc_show(struct seq_file *m, void *v) { - int nbytes; unsigned long ret; srm_env_t *entry; + char *page; - if (off != 0) { - *eof = 1; - return 0; - } + entry = (srm_env_t *)m->private; + page = (char *)__get_free_page(GFP_USER); + if (!page) + return -ENOMEM; - entry = (srm_env_t *) data; - ret = callback_getenv(entry->id, page, count); + ret = callback_getenv(entry->id, page, PAGE_SIZE); if ((ret >> 61) == 0) { - nbytes = (int) ret; - *eof = 1; + seq_write(m, page, ret); + ret = 0; } else - nbytes = -EFAULT; + ret = -EFAULT; + free_page((unsigned long)page); + return ret; +} - return nbytes; +static int srm_env_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, srm_env_proc_show, PDE(inode)->data); } -static int -srm_env_write(struct file *file, const char __user *buffer, unsigned long count, - void *data) +static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) { int res; - srm_env_t *entry; + srm_env_t *entry = PDE(file->f_path.dentry->d_inode)->data; char *buf = (char *) __get_free_page(GFP_USER); unsigned long ret1, ret2; - entry = (srm_env_t *) data; - if (!buf) return -ENOMEM; @@ -140,6 +140,15 @@ srm_env_write(struct file *file, const char __user *buffer, unsigned long count, return res; } +static const struct file_operations srm_env_proc_fops = { + .owner = THIS_MODULE, + .open = srm_env_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = srm_env_proc_write, +}; + static void srm_env_cleanup(void) { @@ -245,15 +254,10 @@ srm_env_init(void) */ entry = srm_named_entries; while (entry->name && entry->id) { - entry->proc_entry = create_proc_entry(entry->name, - 0644, named_dir); + entry->proc_entry = proc_create_data(entry->name, 0644, named_dir, + &srm_env_proc_fops, entry); if (!entry->proc_entry) goto cleanup; - - entry->proc_entry->data = (void *) entry; - entry->proc_entry->read_proc = srm_env_read; - entry->proc_entry->write_proc = srm_env_write; - entry++; } @@ -264,15 +268,12 @@ srm_env_init(void) entry = &srm_numbered_entries[var_num]; entry->name = number[var_num]; - entry->proc_entry = create_proc_entry(entry->name, - 0644, numbered_dir); + entry->proc_entry = proc_create_data(entry->name, 0644, numbered_dir, + &srm_env_proc_fops, entry); if (!entry->proc_entry) goto cleanup; entry->id = var_num; - entry->proc_entry->data = (void *) entry; - entry->proc_entry->read_proc = srm_env_read; - entry->proc_entry->write_proc = srm_env_write; } printk(KERN_INFO "%s: version %s loaded successfully\n", NAME, diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index acac5302e4e..8920b2d6e3b 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *); */ #define do_bad_IRQ(irq,desc) \ do { \ - spin_lock(&desc->lock); \ + raw_spin_lock(&desc->lock); \ handle_bad_irq(irq, desc); \ - spin_unlock(&desc->lock); \ + raw_spin_unlock(&desc->lock); \ } while(0) #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c13681ac1ed..c91c64cab92 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -17,13 +17,13 @@ * Locked value: 1 */ -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); @@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) } } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) } /* write_can_lock - would write_trylock() succeed? */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) /* * Read locks are a bit more hairy: @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cc"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; @@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) } /* read_can_lock - would read_trylock() succeed? */ -#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) +#define arch_read_can_lock(x) ((x)->lock < 0x80000000) -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 43e83f6d2ee..d14d197ae04 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c9a8619f385..b7cb45bb91e 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { #ifdef CONFIG_FIQ show_fiq_list(p, v); @@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) } desc = irq_desc + irq; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (iflags & IRQF_VALID) desc->status &= ~IRQ_NOREQUEST; @@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) desc->status &= ~IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) desc->status &= ~IRQ_NOAUTOEN; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } void __init init_IRQ(void) @@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) { pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* diff --git a/arch/arm/mach-at91/include/mach/atmel-mci.h b/arch/arm/mach-at91/include/mach/atmel-mci.h new file mode 100644 index 00000000000..998cb0c0713 --- /dev/null +++ b/arch/arm/mach-at91/include/mach/atmel-mci.h @@ -0,0 +1,24 @@ +#ifndef __MACH_ATMEL_MCI_H +#define __MACH_ATMEL_MCI_H + +#include <mach/at_hdmac.h> + +/** + * struct mci_dma_data - DMA data for MCI interface + */ +struct mci_dma_data { + struct at_dma_slave sdata; +}; + +/* accessor macros */ +#define slave_data_ptr(s) (&(s)->sdata) +#define find_slave_dev(s) ((s)->sdata.dma_dev) + +#define setup_dma_addr(s, t, r) do { \ + if (s) { \ + (s)->sdata.tx_reg = (t); \ + (s)->sdata.rx_reg = (r); \ + } \ +} while (0) + +#endif /* __MACH_ATMEL_MCI_H */ diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index feb0e54a91d..038f24d4702 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c @@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); BUG_ON(desc->status & IRQ_INPROGRESS); @@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) goto out_mask; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); @@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) * Maybe this function should go to kernel/irq/chip.c? */ note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; if (desc->status & IRQ_DISABLED) @@ -97,7 +97,7 @@ out_mask: /* ack unconditionally to unmask lower prio irqs */ desc->chip->ack(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } #define handle_irq handle_prio_irq #endif diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c index f76d6ff4aeb..0b4a3a03071 100644 --- a/arch/arm/mach-s3c2442/mach-gta02.c +++ b/arch/arm/mach-s3c2442/mach-gta02.c @@ -268,6 +268,9 @@ struct pcf50633_platform_data gta02_pcf_pdata = { .batteries = gta02_batteries, .num_batteries = ARRAY_SIZE(gta02_batteries), + + .charger_reference_current_ma = 1000, + .reg_init_data = { [PCF50633_REGULATOR_AUTO] = { .constraints = { diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c index 6c768b71ad6..53fcef7c520 100644 --- a/arch/arm/plat-omap/debug-leds.c +++ b/arch/arm/plat-omap/debug-leds.c @@ -293,7 +293,7 @@ static int fpga_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops fpga_dev_pm_ops = { +static const struct dev_pm_ops fpga_dev_pm_ops = { .suspend_noirq = fpga_suspend_noirq, .resume_noirq = fpga_resume_noirq, }; diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 055160e0620..04846811d0a 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -1431,7 +1431,7 @@ static int omap_mpuio_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops omap_mpuio_dev_pm_ops = { +static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { .suspend_noirq = omap_mpuio_suspend_noirq, .resume_noirq = omap_mpuio_resume_noirq, }; diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index d856354f427..f2b31933318 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -112,6 +112,11 @@ config CPU_AT32AP7002 bool select CPU_AT32AP700X +# AP700X boards +config BOARD_ATNGW100_COMMON + bool + select CPU_AT32AP7000 + choice prompt "AVR32 board type" default BOARD_ATSTK1000 @@ -119,9 +124,13 @@ choice config BOARD_ATSTK1000 bool "ATSTK1000 evaluation board" -config BOARD_ATNGW100 +config BOARD_ATNGW100_MKI bool "ATNGW100 Network Gateway" - select CPU_AT32AP7000 + select BOARD_ATNGW100_COMMON + +config BOARD_ATNGW100_MKII + bool "ATNGW100 mkII Network Gateway" + select BOARD_ATNGW100_COMMON config BOARD_HAMMERHEAD bool "Hammerhead board" diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile index c21a3290d54..ead8a75203a 100644 --- a/arch/avr32/Makefile +++ b/arch/avr32/Makefile @@ -32,7 +32,7 @@ head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o head-y += arch/avr32/kernel/head.o core-y += $(machdirs) core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/ -core-$(CONFIG_BOARD_ATNGW100) += arch/avr32/boards/atngw100/ +core-$(CONFIG_BOARD_ATNGW100_COMMON) += arch/avr32/boards/atngw100/ core-$(CONFIG_BOARD_HAMMERHEAD) += arch/avr32/boards/hammerhead/ core-$(CONFIG_BOARD_FAVR_32) += arch/avr32/boards/favr-32/ core-$(CONFIG_BOARD_MERISC) += arch/avr32/boards/merisc/ diff --git a/arch/avr32/boards/atngw100/Kconfig b/arch/avr32/boards/atngw100/Kconfig index be27a0218ab..4e55617ade2 100644 --- a/arch/avr32/boards/atngw100/Kconfig +++ b/arch/avr32/boards/atngw100/Kconfig @@ -1,6 +1,17 @@ # NGW100 customization -if BOARD_ATNGW100 +if BOARD_ATNGW100_COMMON + +config BOARD_ATNGW100_MKII_LCD + bool "Enable ATNGW100 mkII LCD interface" + depends on BOARD_ATNGW100_MKII + help + This enables the LCD controller (LCDC) in the AT32AP7000. Since the + LCDC is multiplexed with MACB1 (LAN) Ethernet port, only one can be + enabled at a time. + + This choice enables the LCDC and disables the MACB1 interface marked + LAN on the PCB. choice prompt "Select an NGW100 add-on board to support" @@ -11,15 +22,11 @@ config BOARD_ATNGW100_ADDON_NONE config BOARD_ATNGW100_EVKLCD10X bool "EVKLCD10X addon board" + depends on BOARD_ATNGW100_MKI || BOARD_ATNGW100_MKII_LCD help This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA) - addon board for the NGW100. By enabling this the LCD controller and - AC97 controller is added as platform devices. - - This choice disables the detect pin and the write-protect pin for the - MCI platform device, since it conflicts with the LCD platform device. - The MCI pins can be reenabled by editing the "add device function" but - this may break the setup for other displays that use these pins. + addon board for the NGW100 and NGW100 mkII. By enabling this the LCD + controller and AC97 controller is added as platform devices. config BOARD_ATNGW100_MRMT bool "Mediama RMT1/2 add-on board" @@ -55,4 +62,4 @@ if BOARD_ATNGW100_MRMT source "arch/avr32/boards/atngw100/Kconfig_mrmt" endif -endif # BOARD_ATNGW100 +endif # BOARD_ATNGW100_COMMON diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c index 00337112c5a..20388750d56 100644 --- a/arch/avr32/boards/atngw100/evklcd10x.c +++ b/arch/avr32/boards/atngw100/evklcd10x.c @@ -164,7 +164,12 @@ static int __init atevklcd10x_init(void) at32_add_device_lcdc(0, &atevklcd10x_lcdc_data, fbmem_start, fbmem_size, - ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL); +#ifdef CONFIG_BOARD_ATNGW100_MKII + ATMEL_LCDC_PRI_18BIT | ATMEL_LCDC_PC_DVAL +#else + ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL +#endif + ); at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH); diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c index bf78e516a85..7919be311f4 100644 --- a/arch/avr32/boards/atngw100/mrmt.c +++ b/arch/avr32/boards/atngw100/mrmt.c @@ -302,6 +302,7 @@ static int __init mrmt1_init(void) at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); set_irq_type( AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING ); + at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info)); spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info)); #endif diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c index bc299fbbeb4..8c6a2440e34 100644 --- a/arch/avr32/boards/atngw100/setup.c +++ b/arch/avr32/boards/atngw100/setup.c @@ -20,6 +20,7 @@ #include <linux/leds.h> #include <linux/spi/spi.h> #include <linux/atmel-mci.h> +#include <linux/usb/atmel_usba_udc.h> #include <asm/io.h> #include <asm/setup.h> @@ -36,6 +37,75 @@ unsigned long at32_board_osc_rates[3] = { [2] = 12000000, /* 12 MHz on osc1 */ }; +/* + * The ATNGW100 mkII is very similar to the ATNGW100. Both have the AT32AP7000 + * chip on board; the difference is that the ATNGW100 mkII has 128 MB 32-bit + * SDRAM (the ATNGW100 has 32 MB 16-bit SDRAM) and 256 MB 16-bit NAND flash + * (the ATNGW100 has none.) + * + * The RAM difference is handled by the boot loader, so the only difference we + * end up handling here is the NAND flash, EBI pin reservation and if LCDC or + * MACB1 should be enabled. + */ +#ifdef CONFIG_BOARD_ATNGW100_MKII +#include <linux/mtd/partitions.h> +#include <mach/smc.h> + +static struct smc_timing nand_timing __initdata = { + .ncs_read_setup = 0, + .nrd_setup = 10, + .ncs_write_setup = 0, + .nwe_setup = 10, + + .ncs_read_pulse = 30, + .nrd_pulse = 15, + .ncs_write_pulse = 30, + .nwe_pulse = 15, + + .read_cycle = 30, + .write_cycle = 30, + + .ncs_read_recover = 0, + .nrd_recover = 15, + .ncs_write_recover = 0, + /* WE# high -> RE# low min 60 ns */ + .nwe_recover = 50, +}; + +static struct smc_config nand_config __initdata = { + .bus_width = 2, + .nrd_controlled = 1, + .nwe_controlled = 1, + .nwait_mode = 0, + .byte_write = 0, + .tdf_cycles = 2, + .tdf_mode = 0, +}; + +static struct mtd_partition nand_partitions[] = { + { + .name = "main", + .offset = 0x00000000, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct mtd_partition *nand_part_info(int size, int *num_partitions) +{ + *num_partitions = ARRAY_SIZE(nand_partitions); + return nand_partitions; +} + +static struct atmel_nand_data atngw100mkii_nand_data __initdata = { + .cle = 21, + .ale = 22, + .rdy_pin = GPIO_PIN_PB(28), + .enable_pin = GPIO_PIN_PE(23), + .bus_width_16 = true, + .partition_info = nand_part_info, +}; +#endif + /* Initialized by bootloader-specific startup code. */ struct tag *bootloader_tags __initdata; @@ -56,9 +126,9 @@ static struct spi_board_info spi0_board_info[] __initdata = { static struct mci_platform_data __initdata mci0_data = { .slot[0] = { .bus_width = 4, -#if defined(CONFIG_BOARD_ATNGW100_EVKLCD10X) || defined(CONFIG_BOARD_ATNGW100_MRMT1) - .detect_pin = GPIO_PIN_NONE, - .wp_pin = GPIO_PIN_NONE, +#if defined(CONFIG_BOARD_ATNGW100_MKII) + .detect_pin = GPIO_PIN_PC(25), + .wp_pin = GPIO_PIN_PE(22), #else .detect_pin = GPIO_PIN_PC(25), .wp_pin = GPIO_PIN_PE(0), @@ -66,6 +136,14 @@ static struct mci_platform_data __initdata mci0_data = { }, }; +static struct usba_platform_data atngw100_usba_data __initdata = { +#if defined(CONFIG_BOARD_ATNGW100_MKII) + .vbus_pin = GPIO_PIN_PE(26), +#else + .vbus_pin = -ENODEV, +#endif +}; + /* * The next two functions should go away as the boot loader is * supposed to initialize the macb address registers with a valid @@ -173,18 +251,27 @@ static int __init atngw100_init(void) unsigned i; /* - * ATNGW100 uses 16-bit SDRAM interface, so we don't need to - * reserve any pins for it. + * ATNGW100 mkII uses 32-bit SDRAM interface. Reserve the + * SDRAM-specific pins so that nobody messes with them. */ +#ifdef CONFIG_BOARD_ATNGW100_MKII + at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL); + + smc_set_timing(&nand_config, &nand_timing); + smc_set_configuration(3, &nand_config); + at32_add_device_nand(0, &atngw100mkii_nand_data); +#endif at32_add_device_usart(0); set_hw_addr(at32_add_device_eth(0, ð_data[0])); +#ifndef CONFIG_BOARD_ATNGW100_MKII_LCD set_hw_addr(at32_add_device_eth(1, ð_data[1])); +#endif at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); at32_add_device_mci(0, &mci0_data); - at32_add_device_usba(0, NULL); + at32_add_device_usba(0, &atngw100_usba_data); for (i = 0; i < ARRAY_SIZE(ngw_leds); i++) { at32_select_gpio(ngw_leds[i].gpio, @@ -194,10 +281,14 @@ static int __init atngw100_init(void) /* all these i2c/smbus pins should have external pullups for * open-drain sharing among all I2C devices. SDA and SCL do; - * PB28/EXTINT3 doesn't; it should be SMBALERT# (for PMBus), - * but it's not available off-board. + * PB28/EXTINT3 (ATNGW100) and PE21 (ATNGW100 mkII) doesn't; it should + * be SMBALERT# (for PMBus), but it's not available off-board. */ +#ifdef CONFIG_BOARD_ATNGW100_MKII + at32_select_periph(GPIO_PIOE_BASE, 1 << 21, 0, AT32_GPIOF_PULLUP); +#else at32_select_periph(GPIO_PIOB_BASE, 1 << 28, 0, AT32_GPIOF_PULLUP); +#endif at32_select_gpio(i2c_gpio_data.sda_pin, AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH); at32_select_gpio(i2c_gpio_data.scl_pin, @@ -211,14 +302,22 @@ postcore_initcall(atngw100_init); static int __init atngw100_arch_init(void) { - /* PB30 is the otherwise unused jumper on the mainboard, with an - * external pullup; the jumper grounds it. Use it however you - * like, including letting U-Boot or Linux tweak boot sequences. + /* PB30 (ATNGW100) and PE30 (ATNGW100 mkII) is the otherwise unused + * jumper on the mainboard, with an external pullup; the jumper grounds + * it. Use it however you like, including letting U-Boot or Linux tweak + * boot sequences. */ +#ifdef CONFIG_BOARD_ATNGW100_MKII + at32_select_gpio(GPIO_PIN_PE(30), 0); + gpio_request(GPIO_PIN_PE(30), "j15"); + gpio_direction_input(GPIO_PIN_PE(30)); + gpio_export(GPIO_PIN_PE(30), false); +#else at32_select_gpio(GPIO_PIN_PB(30), 0); gpio_request(GPIO_PIN_PB(30), "j15"); gpio_direction_input(GPIO_PIN_PB(30)); gpio_export(GPIO_PIN_PB(30), false); +#endif /* set_irq_type() after the arch_initcall for EIC has run, and * before the I2C subsystem could try using this IRQ. diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index 574aca97533..32205c9d37d 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.27-rc1 -# Tue Aug 5 16:00:47 2008 +# Linux kernel version: 2.6.32-rc5 +# Thu Oct 29 09:39:22 2009 # CONFIG_AVR32=y CONFIG_GENERIC_GPIO=y @@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_BUG=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -34,22 +35,37 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS=y @@ -59,38 +75,40 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y -# CONFIG_COMPAT_BRK is not set # CONFIG_BASE_FULL is not set CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y -# CONFIG_MARKERS is not set +CONFIG_TRACEPOINTS=y CONFIG_OPROFILE=m CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y -# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set -# CONFIG_HAVE_IOREMAP_PROT is not set CONFIG_HAVE_KPROBES=y -# CONFIG_HAVE_KRETPROBES is not set -# CONFIG_HAVE_ARCH_TRACEHOOK is not set -# CONFIG_HAVE_DMA_ATTRS is not set -# CONFIG_USE_GENERIC_SMP_HELPERS is not set CONFIG_HAVE_CLK=y -CONFIG_PROC_PAGE_MONITOR=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_SLOW_WORK=y # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=1 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -98,11 +116,8 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +CONFIG_LBDAF=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -118,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_CLASSIC_RCU=y +CONFIG_FREEZER=y # # System Type and features @@ -133,8 +148,23 @@ CONFIG_PERFORMANCE_COUNTERS=y CONFIG_PLATFORM_AT32AP=y CONFIG_CPU_AT32AP700X=y CONFIG_CPU_AT32AP7000=y +CONFIG_BOARD_ATNGW100_COMMON=y # CONFIG_BOARD_ATSTK1000 is not set -CONFIG_BOARD_ATNGW100=y +CONFIG_BOARD_ATNGW100_MKI=y +# CONFIG_BOARD_ATNGW100_MKII is not set +# CONFIG_BOARD_HAMMERHEAD is not set +# CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set +# CONFIG_BOARD_MIMC200 is not set +# CONFIG_BOARD_ATSTK1002 is not set +# CONFIG_BOARD_ATSTK1003 is not set +# CONFIG_BOARD_ATSTK1004 is not set +# CONFIG_BOARD_ATSTK1006 is not set +# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set +# CONFIG_BOARD_ATSTK1000_J2_RGB is not set +CONFIG_BOARD_ATNGW100_ADDON_NONE=y +# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set +# CONFIG_BOARD_ATNGW100_MRMT is not set CONFIG_LOADER_U_BOOT=y # @@ -150,7 +180,7 @@ CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set CONFIG_QUICKLIST=y -# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set +# CONFIG_HAVE_ARCH_BOOTMEM is not set # CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set # CONFIG_NEED_NODE_MEMMAP_SIZE is not set CONFIG_ARCH_FLATMEM_ENABLE=y @@ -162,14 +192,16 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 CONFIG_NR_QUICK=2 CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 # CONFIG_OWNERSHIP_TRACE is not set CONFIG_NMI_DEBUGGING=y # CONFIG_HZ_100 is not set @@ -177,7 +209,7 @@ CONFIG_HZ_250=y # CONFIG_HZ_300 is not set # CONFIG_HZ_1000 is not set CONFIG_HZ=250 -# CONFIG_SCHED_HRTICK is not set +CONFIG_SCHED_HRTICK=y CONFIG_CMDLINE="" # @@ -188,6 +220,7 @@ CONFIG_PM=y CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set CONFIG_ARCH_SUSPEND_POSSIBLE=y # @@ -219,6 +252,8 @@ CONFIG_CPU_FREQ_AT32AP=y # Executable file formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set CONFIG_NET=y @@ -271,7 +306,6 @@ CONFIG_INET_TCP_DIAG=y CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_TCP_MD5SIG is not set -# CONFIG_IP_VS is not set CONFIG_IPV6=y # CONFIG_IPV6_PRIVACY is not set # CONFIG_IPV6_ROUTER_PREF is not set @@ -314,10 +348,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_STATE=m +# CONFIG_IP_VS is not set # # IP: Netfilter Configuration # +CONFIG_NF_DEFRAG_IPV4=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_CONNTRACK_PROC_COMPAT=y CONFIG_IP_NF_IPTABLES=m @@ -343,16 +379,18 @@ CONFIG_IP_NF_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set CONFIG_STP=m CONFIG_BRIDGE=m +# CONFIG_NET_DSA is not set CONFIG_VLAN_8021Q=m # CONFIG_VLAN_8021Q_GVRP is not set # CONFIG_DECNET is not set @@ -364,26 +402,33 @@ CONFIG_LLC=m # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +CONFIG_CFG80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set # -# Wireless +# CFG80211 needs to be enabled for MAC80211 # -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -395,6 +440,7 @@ CONFIG_LLC=m # Generic Driver Options # CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set @@ -404,6 +450,7 @@ CONFIG_STANDALONE=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -453,16 +500,17 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x80000000 -CONFIG_MTD_PHYSMAP_LEN=0x0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PHYSMAP_COMPAT is not set # CONFIG_MTD_PLATRAM is not set # # Self-contained MTD device drivers # CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -478,9 +526,22 @@ CONFIG_MTD_DATAFLASH=y # CONFIG_MTD_ONENAND is not set # +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# # UBI - Unsorted block images # -# CONFIG_MTD_UBI is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_RESERVE=1 +# CONFIG_MTD_UBI_GLUEBI is not set + +# +# UBI debugging options +# +# CONFIG_MTD_UBI_DEBUG is not set # CONFIG_PARPORT is not set CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set @@ -498,10 +559,20 @@ CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_TCB_CLKSRC=y CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_ICS932S401 is not set # CONFIG_ATMEL_SSC is not set # CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HAVE_IDE is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set # # SCSI device support @@ -534,26 +605,37 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set CONFIG_MACB=y # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set CONFIG_PPP=m # CONFIG_PPP_MULTILINK is not set @@ -603,9 +685,11 @@ CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_SERIAL_ATMEL_PDC=y # CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set @@ -614,7 +698,9 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_ALGOBIT=m # @@ -624,6 +710,7 @@ CONFIG_I2C_ALGOBIT=m # # I2C system bus drivers (mostly embedded / system-on-chip) # +# CONFIG_I2C_DESIGNWARE is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set @@ -644,14 +731,6 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_EEPROM_AT24=m -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_TPS65010 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -666,19 +745,28 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_ATMEL=y # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y # +# Memory mapped GPIO expanders: +# + +# # I2C GPIO expanders: # # CONFIG_GPIO_MAX732X is not set @@ -694,11 +782,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -707,11 +799,11 @@ CONFIG_WATCHDOG=y # # CONFIG_SOFT_WATCHDOG is not set CONFIG_AT32AP700X_WDT=y +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # @@ -720,22 +812,17 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_TPS65010 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -756,32 +843,43 @@ CONFIG_USB_SUPPORT=y # CONFIG_USB_ARCH_HAS_EHCI is not set # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_GADGET_MUSB_HDRC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # CONFIG_USB_GADGET=y # CONFIG_USB_GADGET_DEBUG is not set # CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 CONFIG_USB_GADGET_SELECTED=y -# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_AT91 is not set CONFIG_USB_GADGET_ATMEL_USBA=y CONFIG_USB_ATMEL_USBA=y # CONFIG_USB_GADGET_FSL_USB2 is not set -# CONFIG_USB_GADGET_NET2280 is not set -# CONFIG_USB_GADGET_PXA25X is not set -# CONFIG_USB_GADGET_M66592 is not set -# CONFIG_USB_GADGET_PXA27X is not set -# CONFIG_USB_GADGET_GOKU is not set # CONFIG_USB_GADGET_LH7A40X is not set # CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set # CONFIG_USB_GADGET_S3C2410 is not set -# CONFIG_USB_GADGET_AT91 is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set # CONFIG_USB_GADGET_DUMMY_HCD is not set CONFIG_USB_GADGET_DUALSPEED=y CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set CONFIG_USB_ETH=m CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set CONFIG_USB_GADGETFS=m CONFIG_USB_FILE_STORAGE=m # CONFIG_USB_FILE_STORAGE_TEST is not set @@ -789,12 +887,18 @@ CONFIG_USB_G_SERIAL=m # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set CONFIG_USB_CDC_COMPOSITE=m + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set CONFIG_MMC=y # CONFIG_MMC_DEBUG is not set # CONFIG_MMC_UNSAFE_RESUME is not set # -# MMC/SD Card Drivers +# MMC/SD/SDIO Card Drivers # CONFIG_MMC_BLOCK=y CONFIG_MMC_BLOCK_BOUNCE=y @@ -802,10 +906,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y CONFIG_MMC_TEST=m # -# MMC/SD Host Controller Drivers +# MMC/SD/SDIO Host Controller Drivers # # CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set CONFIG_MMC_ATMELMCI=y +# CONFIG_MMC_ATMELMCI_DMA is not set CONFIG_MMC_SPI=m # CONFIG_MEMSTICK is not set CONFIG_NEW_LEDS=y @@ -815,7 +921,11 @@ CONFIG_LEDS_CLASS=y # LED drivers # CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set # CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -823,7 +933,13 @@ CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=y + +# +# iptables trigger is under Netfilter config (LED target) +# # CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y @@ -855,25 +971,33 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_M41T80 is not set # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers # # CONFIG_RTC_DRV_M41T94 is not set # CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers # +# CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1511 is not set # CONFIG_RTC_DRV_DS1553 is not set # CONFIG_RTC_DRV_DS1742 is not set # CONFIG_RTC_DRV_STK17TA8 is not set # CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set # CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_V3020 is not set # @@ -892,24 +1016,38 @@ CONFIG_DMA_ENGINE=y # DMA Clients # # CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set # CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# # File systems # -CONFIG_EXT2_FS=m +CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set -CONFIG_EXT3_FS=m +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -# CONFIG_EXT4DEV_FS is not set -CONFIG_JBD=m +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -917,6 +1055,12 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set # # CD-ROM/DVD Filesystems @@ -940,15 +1084,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y # CONFIG_PROC_KCORE is not set CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set CONFIG_CONFIGFS_FS=m - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -967,7 +1109,9 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set +# CONFIG_UBIFS_FS is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -975,7 +1119,9 @@ CONFIG_JFFS2_RTIME=y # CONFIG_QNX4FS_FS is not set # CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set +CONFIG_UFS_FS=y +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y @@ -1060,14 +1206,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set +CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1083,6 +1233,7 @@ CONFIG_SCHED_DEBUG=y # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set @@ -1091,13 +1242,39 @@ CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_LKDTM is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set # @@ -1105,19 +1282,30 @@ CONFIG_FRAME_POINTER=y # # CONFIG_KEYS is not set # CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_CRYPTO=y # # Crypto core or helper # +# CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set # CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=y # CONFIG_CRYPTO_TEST is not set @@ -1145,11 +1333,13 @@ CONFIG_CRYPTO_PCBC=m # CONFIG_CRYPTO_HMAC=y # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1166,7 +1356,7 @@ CONFIG_CRYPTO_SHA1=y # # Ciphers # -# CONFIG_CRYPTO_AES is not set +CONFIG_CRYPTO_AES=m # CONFIG_CRYPTO_ANUBIS is not set CONFIG_CRYPTO_ARC4=m # CONFIG_CRYPTO_BLOWFISH is not set @@ -1186,15 +1376,21 @@ CONFIG_CRYPTO_DES=y # Compression # CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_BITREVERSE=y -# CONFIG_GENERIC_FIND_FIRST_BIT is not set -# CONFIG_GENERIC_FIND_NEXT_BIT is not set +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1204,8 +1400,9 @@ CONFIG_CRC7=m # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y +CONFIG_DECOMPRESS_GZIP=y CONFIG_GENERIC_ALLOCATOR=y -CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index 86a45b5c9d0..c732cc397ad 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.25.6 -# Wed Jun 18 16:06:32 2008 +# Linux kernel version: 2.6.32-rc5 +# Thu Oct 29 09:36:39 2009 # CONFIG_AVR32=y CONFIG_GENERIC_GPIO=y @@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_BUG=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -34,22 +35,37 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS=y @@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y -# CONFIG_COMPAT_BRK is not set # CONFIG_BASE_FULL is not set CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y -# CONFIG_MARKERS is not set +CONFIG_TRACEPOINTS=y CONFIG_OPROFILE=m CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y CONFIG_HAVE_KPROBES=y -# CONFIG_HAVE_KRETPROBES is not set -CONFIG_PROC_PAGE_MONITOR=y +CONFIG_HAVE_CLK=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_SLOW_WORK=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=1 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +CONFIG_LBDAF=y # CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set # # IO Schedulers @@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_CLASSIC_RCU=y +CONFIG_FREEZER=y # # System Type and features @@ -124,13 +148,26 @@ CONFIG_PERFORMANCE_COUNTERS=y CONFIG_PLATFORM_AT32AP=y CONFIG_CPU_AT32AP700X=y CONFIG_CPU_AT32AP7000=y +CONFIG_BOARD_ATNGW100_COMMON=y # CONFIG_BOARD_ATSTK1000 is not set -CONFIG_BOARD_ATNGW100=y +CONFIG_BOARD_ATNGW100_MKI=y +# CONFIG_BOARD_ATNGW100_MKII is not set +# CONFIG_BOARD_HAMMERHEAD is not set +# CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set +# CONFIG_BOARD_MIMC200 is not set +# CONFIG_BOARD_ATSTK1002 is not set +# CONFIG_BOARD_ATSTK1003 is not set +# CONFIG_BOARD_ATSTK1004 is not set +# CONFIG_BOARD_ATSTK1006 is not set +# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set +# CONFIG_BOARD_ATSTK1000_J2_RGB is not set +# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set CONFIG_BOARD_ATNGW100_EVKLCD10X=y +# CONFIG_BOARD_ATNGW100_MRMT is not set CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y # CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set # CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set -CONFIG_BOARD_ATNGW100_I2C_GPIO=y CONFIG_LOADER_U_BOOT=y # @@ -139,14 +176,14 @@ CONFIG_LOADER_U_BOOT=y # CONFIG_AP700X_32_BIT_SMC is not set CONFIG_AP700X_16_BIT_SMC=y # CONFIG_AP700X_8_BIT_SMC is not set -CONFIG_GPIO_DEV=y CONFIG_LOAD_ADDRESS=0x10000000 CONFIG_ENTRY_ADDRESS=0x90000000 CONFIG_PHYS_OFFSET=0x10000000 CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set -# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set +CONFIG_QUICKLIST=y +# CONFIG_HAVE_ARCH_BOOTMEM is not set # CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set # CONFIG_NEED_NODE_MEMMAP_SIZE is not set CONFIG_ARCH_FLATMEM_ENABLE=y @@ -158,33 +195,36 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set +CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 # CONFIG_OWNERSHIP_TRACE is not set CONFIG_NMI_DEBUGGING=y -CONFIG_DW_DMAC=y # CONFIG_HZ_100 is not set CONFIG_HZ_250=y # CONFIG_HZ_300 is not set # CONFIG_HZ_1000 is not set CONFIG_HZ=250 -# CONFIG_SCHED_HRTICK is not set +CONFIG_SCHED_HRTICK=y CONFIG_CMDLINE="" # # Power management options # -CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_PM=y -# CONFIG_PM_LEGACY is not set # CONFIG_PM_DEBUG is not set CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y # # CPU Frequency scaling @@ -194,6 +234,7 @@ CONFIG_CPU_FREQ_TABLE=y # CONFIG_CPU_FREQ_DEBUG is not set # CONFIG_CPU_FREQ_STAT is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set @@ -214,11 +255,9 @@ CONFIG_CPU_FREQ_AT32AP=y # Executable file formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set - -# -# Networking -# CONFIG_NET=y # @@ -232,6 +271,7 @@ CONFIG_XFRM_USER=y # CONFIG_XFRM_SUB_POLICY is not set # CONFIG_XFRM_MIGRATE is not set # CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y CONFIG_NET_KEY=y # CONFIG_NET_KEY_MIGRATE is not set CONFIG_INET=y @@ -269,7 +309,6 @@ CONFIG_INET_TCP_DIAG=y CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_TCP_MD5SIG is not set -# CONFIG_IP_VS is not set CONFIG_IPV6=y # CONFIG_IPV6_PRIVACY is not set # CONFIG_IPV6_ROUTER_PREF is not set @@ -285,8 +324,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y CONFIG_INET6_XFRM_MODE_BEET=y # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set CONFIG_IPV6_SIT=y +CONFIG_IPV6_NDISC_NODETYPE=y # CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set # CONFIG_NETWORK_SECMARK is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set @@ -310,10 +351,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_STATE=m +# CONFIG_IP_VS is not set # # IP: Netfilter Configuration # +CONFIG_NF_DEFRAG_IPV4=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_CONNTRACK_PROC_COMPAT=y CONFIG_IP_NF_IPTABLES=m @@ -339,16 +382,20 @@ CONFIG_IP_NF_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set +CONFIG_STP=m CONFIG_BRIDGE=m +# CONFIG_NET_DSA is not set CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q_GVRP is not set # CONFIG_DECNET is not set CONFIG_LLC=m # CONFIG_LLC2 is not set @@ -358,26 +405,33 @@ CONFIG_LLC=m # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +CONFIG_CFG80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set # -# Wireless +# CFG80211 needs to be enabled for MAC80211 # -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -389,6 +443,7 @@ CONFIG_LLC=m # Generic Driver Options # CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set @@ -398,10 +453,12 @@ CONFIG_STANDALONE=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set # # User Modules And Translation Layers @@ -446,16 +503,17 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x80000000 -CONFIG_MTD_PHYSMAP_LEN=0x0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PHYSMAP_COMPAT is not set # CONFIG_MTD_PLATRAM is not set # # Self-contained MTD device drivers # CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -471,6 +529,11 @@ CONFIG_MTD_DATAFLASH=y # CONFIG_MTD_ONENAND is not set # +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# # UBI - Unsorted block images # CONFIG_MTD_UBI=y @@ -499,10 +562,20 @@ CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_TCB_CLKSRC=y CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_ICS932S401 is not set # CONFIG_ATMEL_SSC is not set # CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HAVE_IDE is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set # # SCSI device support @@ -514,7 +587,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 # CONFIG_ATA is not set # CONFIG_MD is not set CONFIG_NETDEVICES=y -# CONFIG_NETDEVICES_MULTIQUEUE is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set @@ -536,25 +608,37 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set CONFIG_MACB=y # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set # CONFIG_WAN is not set CONFIG_PPP=m # CONFIG_PPP_MULTILINK is not set @@ -598,15 +682,30 @@ CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_TABLET is not set CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879_I2C is not set +# CONFIG_TOUCHSCREEN_AD7879_SPI is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_EETI is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set # CONFIG_TOUCHSCREEN_MK712 is not set # CONFIG_TOUCHSCREEN_PENMOUNT is not set # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set # CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_UCB1400 is not set +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set # CONFIG_INPUT_MISC is not set # @@ -619,9 +718,11 @@ CONFIG_INPUT_TOUCHSCREEN=y # Character devices # CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y CONFIG_HW_CONSOLE=y # CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y # CONFIG_SERIAL_NONSTANDARD is not set # @@ -636,9 +737,11 @@ CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_SERIAL_ATMEL_PDC=y # CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set @@ -647,45 +750,44 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m # -# I2C Algorithms +# I2C Hardware Bus support # -CONFIG_I2C_ALGOBIT=m -# CONFIG_I2C_ALGOPCF is not set -# CONFIG_I2C_ALGOPCA is not set # -# I2C Hardware Bus support +# I2C system bus drivers (mostly embedded / system-on-chip) # -CONFIG_I2C_ATMELTWI=m +# CONFIG_I2C_DESIGNWARE is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set # CONFIG_I2C_STUB is not set # # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_TPS65010 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set # CONFIG_I2C_DEBUG_CHIP is not set - -# -# SPI support -# CONFIG_SPI=y # CONFIG_SPI_DEBUG is not set CONFIG_SPI_MASTER=y @@ -695,30 +797,48 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_ATMEL=y # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set -CONFIG_HAVE_GPIO_LIB=y # -# GPIO Support +# PPS support # +# CONFIG_PPS is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# # # I2C GPIO expanders: # +# CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set # +# PCI GPIO expanders: +# + +# # SPI GPIO expanders: # +# CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set @@ -731,24 +851,31 @@ CONFIG_WATCHDOG=y # # CONFIG_SOFT_WATCHDOG is not set CONFIG_AT32AP700X_WDT=y +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # # Multifunction device drivers # +# CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set - -# -# Multimedia devices -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_DAB is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -758,6 +885,7 @@ CONFIG_SSB_POSSIBLE=y CONFIG_FB=y # CONFIG_FIRMWARE_EDID is not set # CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y CONFIG_FB_CFB_IMAGEBLIT=y @@ -765,8 +893,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y # CONFIG_FB_SYS_FILLRECT is not set # CONFIG_FB_SYS_COPYAREA is not set # CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set # CONFIG_FB_SYS_FOPS is not set -CONFIG_FB_DEFERRED_IO=y # CONFIG_FB_SVGALIB is not set # CONFIG_FB_MACMODES is not set # CONFIG_FB_BACKLIGHT is not set @@ -779,6 +907,9 @@ CONFIG_FB_DEFERRED_IO=y # CONFIG_FB_S1D13XXX is not set CONFIG_FB_ATMEL=y # CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -792,119 +923,124 @@ CONFIG_FB_ATMEL=y CONFIG_DUMMY_CONSOLE=y # CONFIG_FRAMEBUFFER_CONSOLE is not set # CONFIG_LOGO is not set - -# -# Sound -# CONFIG_SOUND=y - -# -# Advanced Linux Sound Architecture -# +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y -CONFIG_SND_TIMER=m +CONFIG_SND_TIMER=y CONFIG_SND_PCM=m # CONFIG_SND_SEQUENCER is not set CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_HRTIMER=y # CONFIG_SND_DYNAMIC_MINORS is not set # CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set - -# -# Generic devices -# +CONFIG_SND_VMASTER=y +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_AC97_CODEC=m -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set +# CONFIG_SND_DRIVERS is not set # -# AVR32 devices -# -CONFIG_SND_ATMEL_AC97=m - -# -# SPI devices -# - -# -# System on Chip audio support +# Atmel devices (AVR32 and AT91) # +# CONFIG_SND_ATMEL_ABDAC is not set +CONFIG_SND_ATMEL_AC97C=m +# CONFIG_SND_SPI is not set # CONFIG_SND_SOC is not set - -# -# SoC Audio support for SuperH -# - -# -# ALSA SoC audio for Freescale SOCs -# - -# -# Open Sound System -# # CONFIG_SOUND_PRIME is not set CONFIG_AC97_BUS=m CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# CONFIG_USB_SUPPORT=y # CONFIG_USB_ARCH_HAS_HCD is not set # CONFIG_USB_ARCH_HAS_OHCI is not set # CONFIG_USB_ARCH_HAS_EHCI is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_GADGET_MUSB_HDRC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # CONFIG_USB_GADGET=y # CONFIG_USB_GADGET_DEBUG is not set # CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=350 CONFIG_USB_GADGET_SELECTED=y -# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_AT91 is not set CONFIG_USB_GADGET_ATMEL_USBA=y CONFIG_USB_ATMEL_USBA=y # CONFIG_USB_GADGET_FSL_USB2 is not set -# CONFIG_USB_GADGET_NET2280 is not set -# CONFIG_USB_GADGET_PXA2XX is not set -# CONFIG_USB_GADGET_M66592 is not set -# CONFIG_USB_GADGET_GOKU is not set # CONFIG_USB_GADGET_LH7A40X is not set # CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set # CONFIG_USB_GADGET_S3C2410 is not set -# CONFIG_USB_GADGET_AT91 is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set # CONFIG_USB_GADGET_DUMMY_HCD is not set CONFIG_USB_GADGET_DUALSPEED=y CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set CONFIG_USB_ETH=m CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set CONFIG_USB_GADGETFS=m CONFIG_USB_FILE_STORAGE=m # CONFIG_USB_FILE_STORAGE_TEST is not set CONFIG_USB_G_SERIAL=m # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set +CONFIG_USB_CDC_COMPOSITE=m + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set CONFIG_MMC=y # CONFIG_MMC_DEBUG is not set # CONFIG_MMC_UNSAFE_RESUME is not set # -# MMC/SD Card Drivers +# MMC/SD/SDIO Card Drivers # CONFIG_MMC_BLOCK=y CONFIG_MMC_BLOCK_BOUNCE=y # CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set # -# MMC/SD Host Controller Drivers +# MMC/SD/SDIO Host Controller Drivers # +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set CONFIG_MMC_ATMELMCI=y +# CONFIG_MMC_ATMELMCI_DMA is not set # CONFIG_MMC_SPI is not set # CONFIG_MEMSTICK is not set CONFIG_NEW_LEDS=y @@ -913,7 +1049,13 @@ CONFIG_LEDS_CLASS=y # # LED drivers # +# CONFIG_LEDS_PCA9532 is not set CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -921,6 +1063,14 @@ CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y @@ -950,51 +1100,84 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_PCF8583 is not set # CONFIG_RTC_DRV_M41T80 is not set # CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers # +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers # +# CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1511 is not set # CONFIG_RTC_DRV_DS1553 is not set # CONFIG_RTC_DRV_DS1742 is not set # CONFIG_RTC_DRV_STK17TA8 is not set # CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set # CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_V3020 is not set # # on-CPU RTC drivers # CONFIG_RTC_DRV_AT32AP700X=y +CONFIG_DMADEVICES=y # -# Userspace I/O +# DMA Devices # +CONFIG_DW_DMAC=y +CONFIG_DMA_ENGINE=y + +# +# DMA Clients +# +# CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# # File systems # CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -# CONFIG_EXT4DEV_FS is not set +# CONFIG_EXT4_FS is not set CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1002,6 +1185,12 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set # # CD-ROM/DVD Filesystems @@ -1025,15 +1214,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y # CONFIG_PROC_KCORE is not set CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set CONFIG_CONFIGFS_FS=y - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1059,8 +1246,10 @@ CONFIG_UBIFS_FS_LZO=y CONFIG_UBIFS_FS_ZLIB=y # CONFIG_UBIFS_FS_DEBUG is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_ROMFS_FS is not set @@ -1071,19 +1260,16 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set -# CONFIG_NFS_DIRECTIO is not set +CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y # CONFIG_NFSD_V3_ACL is not set # CONFIG_NFSD_V4 is not set -CONFIG_NFSD_TCP=y -CONFIG_ROOT_NFS=y CONFIG_LOCKD=y CONFIG_LOCKD_V4=y CONFIG_EXPORTFS=m CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y -# CONFIG_SUNRPC_BIND34 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1151,16 +1337,24 @@ CONFIG_NLS_UTF8=m # CONFIG_PRINTK_TIME is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set +CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set # CONFIG_SLUB_DEBUG_ON is not set # CONFIG_SLUB_STATS is not set # CONFIG_DEBUG_RT_MUTEXES is not set @@ -1172,19 +1366,48 @@ CONFIG_SCHED_DEBUG=y # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_LKDTM is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set # @@ -1192,63 +1415,118 @@ CONFIG_FRAME_POINTER=y # # CONFIG_KEYS is not set # CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y -# CONFIG_CRYPTO_SEQIV is not set +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# CONFIG_CRYPTO_HMAC=y # CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set CONFIG_CRYPTO_SHA1=y # CONFIG_CRYPTO_SHA256 is not set # CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_WP512 is not set # CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_GF128MUL is not set -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_XTS is not set -# CONFIG_CRYPTO_CTR is not set -# CONFIG_CRYPTO_GCM is not set -# CONFIG_CRYPTO_CCM is not set -# CONFIG_CRYPTO_CRYPTD is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=m +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=m # CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_TWOFISH is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_CAMELLIA is not set # CONFIG_CRYPTO_CAST5 is not set # CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_TEA is not set -CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set # CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_SEED is not set # CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# CONFIG_CRYPTO_DEFLATE=y -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_TEST is not set -CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_ZLIB is not set CONFIG_CRYPTO_LZO=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set # CONFIG_CRC_ITU_T is not set CONFIG_CRC32=y # CONFIG_CRC7 is not set @@ -1257,8 +1535,9 @@ CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y +CONFIG_DECOMPRESS_GZIP=y CONFIG_GENERIC_ALLOCATOR=y -CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index a96b68ea5e8..5ef67da343b 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.25.6 -# Wed Jun 18 16:09:32 2008 +# Linux kernel version: 2.6.32-rc5 +# Thu Oct 29 09:37:19 2009 # CONFIG_AVR32=y CONFIG_GENERIC_GPIO=y @@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_BUG=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -34,22 +35,37 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS=y @@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y -# CONFIG_COMPAT_BRK is not set # CONFIG_BASE_FULL is not set CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y -# CONFIG_MARKERS is not set +CONFIG_TRACEPOINTS=y CONFIG_OPROFILE=m CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y CONFIG_HAVE_KPROBES=y -# CONFIG_HAVE_KRETPROBES is not set -CONFIG_PROC_PAGE_MONITOR=y +CONFIG_HAVE_CLK=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_SLOW_WORK=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=1 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +CONFIG_LBDAF=y # CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set # # IO Schedulers @@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_CLASSIC_RCU=y +CONFIG_FREEZER=y # # System Type and features @@ -124,13 +148,20 @@ CONFIG_PERFORMANCE_COUNTERS=y CONFIG_PLATFORM_AT32AP=y CONFIG_CPU_AT32AP700X=y CONFIG_CPU_AT32AP7000=y +CONFIG_BOARD_ATNGW100_COMMON=y # CONFIG_BOARD_ATSTK1000 is not set -CONFIG_BOARD_ATNGW100=y +CONFIG_BOARD_ATNGW100_MKI=y +# CONFIG_BOARD_ATNGW100_MKII is not set +# CONFIG_BOARD_HAMMERHEAD is not set +# CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set +# CONFIG_BOARD_MIMC200 is not set +# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set CONFIG_BOARD_ATNGW100_EVKLCD10X=y +# CONFIG_BOARD_ATNGW100_MRMT is not set # CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y # CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set -CONFIG_BOARD_ATNGW100_I2C_GPIO=y CONFIG_LOADER_U_BOOT=y # @@ -139,14 +170,14 @@ CONFIG_LOADER_U_BOOT=y # CONFIG_AP700X_32_BIT_SMC is not set CONFIG_AP700X_16_BIT_SMC=y # CONFIG_AP700X_8_BIT_SMC is not set -CONFIG_GPIO_DEV=y CONFIG_LOAD_ADDRESS=0x10000000 CONFIG_ENTRY_ADDRESS=0x90000000 CONFIG_PHYS_OFFSET=0x10000000 CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set -# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set +CONFIG_QUICKLIST=y +# CONFIG_HAVE_ARCH_BOOTMEM is not set # CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set # CONFIG_NEED_NODE_MEMMAP_SIZE is not set CONFIG_ARCH_FLATMEM_ENABLE=y @@ -158,33 +189,36 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set +CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 # CONFIG_OWNERSHIP_TRACE is not set CONFIG_NMI_DEBUGGING=y -CONFIG_DW_DMAC=y # CONFIG_HZ_100 is not set CONFIG_HZ_250=y # CONFIG_HZ_300 is not set # CONFIG_HZ_1000 is not set CONFIG_HZ=250 -# CONFIG_SCHED_HRTICK is not set +CONFIG_SCHED_HRTICK=y CONFIG_CMDLINE="" # # Power management options # -CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_PM=y -# CONFIG_PM_LEGACY is not set # CONFIG_PM_DEBUG is not set CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y # # CPU Frequency scaling @@ -194,6 +228,7 @@ CONFIG_CPU_FREQ_TABLE=y # CONFIG_CPU_FREQ_DEBUG is not set # CONFIG_CPU_FREQ_STAT is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set @@ -214,11 +249,9 @@ CONFIG_CPU_FREQ_AT32AP=y # Executable file formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set - -# -# Networking -# CONFIG_NET=y # @@ -232,6 +265,7 @@ CONFIG_XFRM_USER=y # CONFIG_XFRM_SUB_POLICY is not set # CONFIG_XFRM_MIGRATE is not set # CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y CONFIG_NET_KEY=y # CONFIG_NET_KEY_MIGRATE is not set CONFIG_INET=y @@ -269,7 +303,6 @@ CONFIG_INET_TCP_DIAG=y CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_TCP_MD5SIG is not set -# CONFIG_IP_VS is not set CONFIG_IPV6=y # CONFIG_IPV6_PRIVACY is not set # CONFIG_IPV6_ROUTER_PREF is not set @@ -285,8 +318,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y CONFIG_INET6_XFRM_MODE_BEET=y # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set CONFIG_IPV6_SIT=y +CONFIG_IPV6_NDISC_NODETYPE=y # CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set # CONFIG_NETWORK_SECMARK is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set @@ -310,10 +345,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_STATE=m +# CONFIG_IP_VS is not set # # IP: Netfilter Configuration # +CONFIG_NF_DEFRAG_IPV4=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_CONNTRACK_PROC_COMPAT=y CONFIG_IP_NF_IPTABLES=m @@ -339,16 +376,20 @@ CONFIG_IP_NF_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set +CONFIG_STP=m CONFIG_BRIDGE=m +# CONFIG_NET_DSA is not set CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q_GVRP is not set # CONFIG_DECNET is not set CONFIG_LLC=m # CONFIG_LLC2 is not set @@ -358,26 +399,33 @@ CONFIG_LLC=m # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +CONFIG_CFG80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set # -# Wireless +# CFG80211 needs to be enabled for MAC80211 # -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -389,6 +437,7 @@ CONFIG_LLC=m # Generic Driver Options # CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set @@ -398,10 +447,12 @@ CONFIG_STANDALONE=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set # # User Modules And Translation Layers @@ -446,16 +497,17 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x80000000 -CONFIG_MTD_PHYSMAP_LEN=0x0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PHYSMAP_COMPAT is not set # CONFIG_MTD_PLATRAM is not set # # Self-contained MTD device drivers # CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -471,6 +523,11 @@ CONFIG_MTD_DATAFLASH=y # CONFIG_MTD_ONENAND is not set # +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# # UBI - Unsorted block images # CONFIG_MTD_UBI=y @@ -499,10 +556,20 @@ CONFIG_MISC_DEVICES=y CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_TCB_CLKSRC=y CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_ICS932S401 is not set # CONFIG_ATMEL_SSC is not set # CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HAVE_IDE is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set # # SCSI device support @@ -514,7 +581,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 # CONFIG_ATA is not set # CONFIG_MD is not set CONFIG_NETDEVICES=y -# CONFIG_NETDEVICES_MULTIQUEUE is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set @@ -536,25 +602,37 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set CONFIG_MACB=y # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set # CONFIG_WAN is not set CONFIG_PPP=m # CONFIG_PPP_MULTILINK is not set @@ -598,15 +676,30 @@ CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_TABLET is not set CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879_I2C is not set +# CONFIG_TOUCHSCREEN_AD7879_SPI is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_EETI is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set # CONFIG_TOUCHSCREEN_MK712 is not set # CONFIG_TOUCHSCREEN_PENMOUNT is not set # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set # CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_UCB1400 is not set +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set # CONFIG_INPUT_MISC is not set # @@ -619,9 +712,11 @@ CONFIG_INPUT_TOUCHSCREEN=y # Character devices # CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y CONFIG_HW_CONSOLE=y # CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y # CONFIG_SERIAL_NONSTANDARD is not set # @@ -636,9 +731,11 @@ CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_SERIAL_ATMEL_PDC=y # CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set @@ -647,45 +744,44 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m # -# I2C Algorithms +# I2C Hardware Bus support # -CONFIG_I2C_ALGOBIT=m -# CONFIG_I2C_ALGOPCF is not set -# CONFIG_I2C_ALGOPCA is not set # -# I2C Hardware Bus support +# I2C system bus drivers (mostly embedded / system-on-chip) # -CONFIG_I2C_ATMELTWI=m +# CONFIG_I2C_DESIGNWARE is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set # CONFIG_I2C_STUB is not set # # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_TPS65010 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set # CONFIG_I2C_DEBUG_CHIP is not set - -# -# SPI support -# CONFIG_SPI=y # CONFIG_SPI_DEBUG is not set CONFIG_SPI_MASTER=y @@ -695,30 +791,48 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_ATMEL=y # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set -CONFIG_HAVE_GPIO_LIB=y # -# GPIO Support +# PPS support # +# CONFIG_PPS is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# # # I2C GPIO expanders: # +# CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set # +# PCI GPIO expanders: +# + +# # SPI GPIO expanders: # +# CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set @@ -731,24 +845,31 @@ CONFIG_WATCHDOG=y # # CONFIG_SOFT_WATCHDOG is not set CONFIG_AT32AP700X_WDT=y +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # # Multifunction device drivers # +# CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set - -# -# Multimedia devices -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_DAB is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -758,6 +879,7 @@ CONFIG_SSB_POSSIBLE=y CONFIG_FB=y # CONFIG_FIRMWARE_EDID is not set # CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y CONFIG_FB_CFB_IMAGEBLIT=y @@ -765,8 +887,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y # CONFIG_FB_SYS_FILLRECT is not set # CONFIG_FB_SYS_COPYAREA is not set # CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set # CONFIG_FB_SYS_FOPS is not set -CONFIG_FB_DEFERRED_IO=y # CONFIG_FB_SVGALIB is not set # CONFIG_FB_MACMODES is not set # CONFIG_FB_BACKLIGHT is not set @@ -779,6 +901,9 @@ CONFIG_FB_DEFERRED_IO=y # CONFIG_FB_S1D13XXX is not set CONFIG_FB_ATMEL=y # CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -792,119 +917,124 @@ CONFIG_FB_ATMEL=y CONFIG_DUMMY_CONSOLE=y # CONFIG_FRAMEBUFFER_CONSOLE is not set # CONFIG_LOGO is not set - -# -# Sound -# CONFIG_SOUND=y - -# -# Advanced Linux Sound Architecture -# +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y -CONFIG_SND_TIMER=m +CONFIG_SND_TIMER=y CONFIG_SND_PCM=m # CONFIG_SND_SEQUENCER is not set CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_HRTIMER=y # CONFIG_SND_DYNAMIC_MINORS is not set # CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set - -# -# Generic devices -# +CONFIG_SND_VMASTER=y +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_AC97_CODEC=m -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set +# CONFIG_SND_DRIVERS is not set # -# AVR32 devices -# -CONFIG_SND_ATMEL_AC97=m - -# -# SPI devices -# - -# -# System on Chip audio support +# Atmel devices (AVR32 and AT91) # +# CONFIG_SND_ATMEL_ABDAC is not set +CONFIG_SND_ATMEL_AC97C=m +# CONFIG_SND_SPI is not set # CONFIG_SND_SOC is not set - -# -# SoC Audio support for SuperH -# - -# -# ALSA SoC audio for Freescale SOCs -# - -# -# Open Sound System -# # CONFIG_SOUND_PRIME is not set CONFIG_AC97_BUS=m CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# CONFIG_USB_SUPPORT=y # CONFIG_USB_ARCH_HAS_HCD is not set # CONFIG_USB_ARCH_HAS_OHCI is not set # CONFIG_USB_ARCH_HAS_EHCI is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_GADGET_MUSB_HDRC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # CONFIG_USB_GADGET=y # CONFIG_USB_GADGET_DEBUG is not set # CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=350 CONFIG_USB_GADGET_SELECTED=y -# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_AT91 is not set CONFIG_USB_GADGET_ATMEL_USBA=y CONFIG_USB_ATMEL_USBA=y # CONFIG_USB_GADGET_FSL_USB2 is not set -# CONFIG_USB_GADGET_NET2280 is not set -# CONFIG_USB_GADGET_PXA2XX is not set -# CONFIG_USB_GADGET_M66592 is not set -# CONFIG_USB_GADGET_GOKU is not set # CONFIG_USB_GADGET_LH7A40X is not set # CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set # CONFIG_USB_GADGET_S3C2410 is not set -# CONFIG_USB_GADGET_AT91 is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set # CONFIG_USB_GADGET_DUMMY_HCD is not set CONFIG_USB_GADGET_DUALSPEED=y CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set CONFIG_USB_ETH=m CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set CONFIG_USB_GADGETFS=m CONFIG_USB_FILE_STORAGE=m # CONFIG_USB_FILE_STORAGE_TEST is not set CONFIG_USB_G_SERIAL=m # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set +CONFIG_USB_CDC_COMPOSITE=m + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set CONFIG_MMC=y # CONFIG_MMC_DEBUG is not set # CONFIG_MMC_UNSAFE_RESUME is not set # -# MMC/SD Card Drivers +# MMC/SD/SDIO Card Drivers # CONFIG_MMC_BLOCK=y CONFIG_MMC_BLOCK_BOUNCE=y # CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set # -# MMC/SD Host Controller Drivers +# MMC/SD/SDIO Host Controller Drivers # +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set CONFIG_MMC_ATMELMCI=y +# CONFIG_MMC_ATMELMCI_DMA is not set # CONFIG_MMC_SPI is not set # CONFIG_MEMSTICK is not set CONFIG_NEW_LEDS=y @@ -913,7 +1043,13 @@ CONFIG_LEDS_CLASS=y # # LED drivers # +# CONFIG_LEDS_PCA9532 is not set CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -921,6 +1057,14 @@ CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y @@ -950,51 +1094,84 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_PCF8583 is not set # CONFIG_RTC_DRV_M41T80 is not set # CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers # +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers # +# CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1511 is not set # CONFIG_RTC_DRV_DS1553 is not set # CONFIG_RTC_DRV_DS1742 is not set # CONFIG_RTC_DRV_STK17TA8 is not set # CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set # CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_V3020 is not set # # on-CPU RTC drivers # CONFIG_RTC_DRV_AT32AP700X=y +CONFIG_DMADEVICES=y # -# Userspace I/O +# DMA Devices # +CONFIG_DW_DMAC=y +CONFIG_DMA_ENGINE=y + +# +# DMA Clients +# +# CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# # File systems # CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -# CONFIG_EXT4DEV_FS is not set +# CONFIG_EXT4_FS is not set CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1002,6 +1179,12 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set # # CD-ROM/DVD Filesystems @@ -1025,15 +1208,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y # CONFIG_PROC_KCORE is not set CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set CONFIG_CONFIGFS_FS=y - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1059,8 +1240,10 @@ CONFIG_UBIFS_FS_LZO=y CONFIG_UBIFS_FS_ZLIB=y # CONFIG_UBIFS_FS_DEBUG is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_ROMFS_FS is not set @@ -1071,19 +1254,16 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set -# CONFIG_NFS_DIRECTIO is not set +CONFIG_ROOT_NFS=y CONFIG_NFSD=m CONFIG_NFSD_V3=y # CONFIG_NFSD_V3_ACL is not set # CONFIG_NFSD_V4 is not set -CONFIG_NFSD_TCP=y -CONFIG_ROOT_NFS=y CONFIG_LOCKD=y CONFIG_LOCKD_V4=y CONFIG_EXPORTFS=m CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y -# CONFIG_SUNRPC_BIND34 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1151,16 +1331,24 @@ CONFIG_NLS_UTF8=m # CONFIG_PRINTK_TIME is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set +CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set # CONFIG_SLUB_DEBUG_ON is not set # CONFIG_SLUB_STATS is not set # CONFIG_DEBUG_RT_MUTEXES is not set @@ -1172,19 +1360,48 @@ CONFIG_SCHED_DEBUG=y # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_LKDTM is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set # @@ -1192,63 +1409,118 @@ CONFIG_FRAME_POINTER=y # # CONFIG_KEYS is not set # CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y -# CONFIG_CRYPTO_SEQIV is not set +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# CONFIG_CRYPTO_HMAC=y # CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set CONFIG_CRYPTO_SHA1=y # CONFIG_CRYPTO_SHA256 is not set # CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_WP512 is not set # CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_GF128MUL is not set -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_XTS is not set -# CONFIG_CRYPTO_CTR is not set -# CONFIG_CRYPTO_GCM is not set -# CONFIG_CRYPTO_CCM is not set -# CONFIG_CRYPTO_CRYPTD is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=m +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=m # CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_TWOFISH is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_CAMELLIA is not set # CONFIG_CRYPTO_CAST5 is not set # CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_TEA is not set -CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set # CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_SEED is not set # CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# CONFIG_CRYPTO_DEFLATE=y -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_TEST is not set -CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_ZLIB is not set CONFIG_CRYPTO_LZO=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set # CONFIG_CRC_ITU_T is not set CONFIG_CRC32=y # CONFIG_CRC7 is not set @@ -1257,8 +1529,9 @@ CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y +CONFIG_DECOMPRESS_GZIP=y CONFIG_GENERIC_ALLOCATOR=y -CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig new file mode 100644 index 00000000000..9b8b5b3b9c7 --- /dev/null +++ b/arch/avr32/configs/atngw100mkii_defconfig @@ -0,0 +1,1414 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.32-rc5 +# Thu Nov 5 15:32:26 2009 +# +CONFIG_AVR32=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +# CONFIG_BASE_FULL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_OPROFILE=m +CONFIG_HAVE_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_CLK=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_SLOW_WORK=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=1 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_FREEZER=y + +# +# System Type and features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_SUBARCH_AVR32B=y +CONFIG_MMU=y +CONFIG_PERFORMANCE_COUNTERS=y +CONFIG_PLATFORM_AT32AP=y +CONFIG_CPU_AT32AP700X=y +CONFIG_CPU_AT32AP7000=y +CONFIG_BOARD_ATNGW100_COMMON=y +# CONFIG_BOARD_ATSTK1000 is not set +# CONFIG_BOARD_ATNGW100_MKI is not set +CONFIG_BOARD_ATNGW100_MKII=y +# CONFIG_BOARD_HAMMERHEAD is not set +# CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set +# CONFIG_BOARD_MIMC200 is not set +# CONFIG_BOARD_ATNGW100_MKII_LCD is not set +CONFIG_BOARD_ATNGW100_ADDON_NONE=y +# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set +# CONFIG_BOARD_ATNGW100_MRMT is not set +CONFIG_LOADER_U_BOOT=y + +# +# Atmel AVR32 AP options +# +# CONFIG_AP700X_32_BIT_SMC is not set +CONFIG_AP700X_16_BIT_SMC=y +# CONFIG_AP700X_8_BIT_SMC is not set +CONFIG_LOAD_ADDRESS=0x10000000 +CONFIG_ENTRY_ADDRESS=0x90000000 +CONFIG_PHYS_OFFSET=0x10000000 +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_QUICKLIST=y +# CONFIG_HAVE_ARCH_BOOTMEM is not set +# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set +# CONFIG_NEED_NODE_MEMMAP_SIZE is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +# CONFIG_ARCH_SPARSEMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +# CONFIG_OWNERSHIP_TRACE is not set +CONFIG_NMI_DEBUGGING=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_CMDLINE="" + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +# CONFIG_CPU_FREQ_STAT is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_AT32AP=y + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_MULTIPLE_TABLES is not set +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +# CONFIG_IP_PIMSM_V2 is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +# CONFIG_IPV6_PRIVACY is not set +# CONFIG_IPV6_ROUTER_PREF is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +# CONFIG_IPV6_MIP6 is not set +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_TUNNEL is not set +# CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_ADVANCED is not set + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +# CONFIG_NF_NAT_TFTP is not set +# CONFIG_NF_NAT_AMANDA is not set +# CONFIG_NF_NAT_PPTP is not set +# CONFIG_NF_NAT_H323 is not set +CONFIG_NF_NAT_SIP=m +CONFIG_IP_NF_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_STP=m +CONFIG_BRIDGE=m +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +CONFIG_CFG80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND=y +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_NAND_ECC_SMC is not set +# CONFIG_MTD_NAND_MUSEUM_IDS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +CONFIG_MTD_NAND_ATMEL=y +CONFIG_MTD_NAND_ATMEL_ECC_HW=y +# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set +# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set +# CONFIG_MTD_NAND_NANDSIM is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_RESERVE=1 +# CONFIG_MTD_UBI_GLUEBI is not set + +# +# UBI debugging options +# +# CONFIG_MTD_UBI_DEBUG is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_MISC_DEVICES=y +# CONFIG_ATMEL_PWM is not set +CONFIG_ATMEL_TCLIB=y +CONFIG_ATMEL_TCB_CLKSRC=y +CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 +# CONFIG_ICS932S401 is not set +# CONFIG_ATMEL_SSC is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=m +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_MDIO_BITBANG is not set +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +CONFIG_MACB=y +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +CONFIG_PPP_FILTER=y +CONFIG_PPP_ASYNC=m +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m +# CONFIG_PPPOL2TP is not set +# CONFIG_SLIP is not set +CONFIG_SLHC=m +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_ATMEL=y +CONFIG_SERIAL_ATMEL_CONSOLE=y +CONFIG_SERIAL_ATMEL_PDC=y +# CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_ATMEL=y +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +CONFIG_AT32AP700X_WDT=y +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_SOUND is not set +CONFIG_USB_SUPPORT=y +# CONFIG_USB_ARCH_HAS_HCD is not set +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_GADGET_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_AT91 is not set +CONFIG_USB_GADGET_ATMEL_USBA=y +CONFIG_USB_ATMEL_USBA=y +# CONFIG_USB_GADGET_FSL_USB2 is not set +# CONFIG_USB_GADGET_LH7A40X is not set +# CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set +# CONFIG_USB_GADGET_S3C2410 is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set +# CONFIG_USB_GADGET_DUMMY_HCD is not set +CONFIG_USB_GADGET_DUALSPEED=y +CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set +CONFIG_USB_GADGETFS=m +CONFIG_USB_FILE_STORAGE=m +# CONFIG_USB_FILE_STORAGE_TEST is not set +CONFIG_USB_G_SERIAL=m +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_CDC_COMPOSITE=m + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_UNSAFE_RESUME is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +CONFIG_MMC_TEST=m + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set +CONFIG_MMC_ATMELMCI=y +# CONFIG_MMC_ATMELMCI_DMA is not set +CONFIG_MMC_SPI=m +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_AT32AP700X=y +CONFIG_DMADEVICES=y + +# +# DMA Devices +# +CONFIG_DW_DMAC=y +CONFIG_DMA_ENGINE=y + +# +# DMA Clients +# +# CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +# CONFIG_EXT3_FS_XATTR is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFFS2_LZO is not set +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_UBIFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_UFS_FS=y +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +CONFIG_ROOT_NFS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +# CONFIG_NFSD_V4 is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +# CONFIG_SMB_NLS_DEFAULT is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_XATTR is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=m +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +CONFIG_NLS_CODEPAGE_850=m +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_SAMPLES is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +# CONFIG_CRYPTO_LRW is not set +CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=m +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=m +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +CONFIG_CRC7=m +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig new file mode 100644 index 00000000000..01e913d66be --- /dev/null +++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig @@ -0,0 +1,1549 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.32-rc5 +# Thu Nov 5 15:33:09 2009 +# +CONFIG_AVR32=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +# CONFIG_BASE_FULL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_OPROFILE=m +CONFIG_HAVE_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_CLK=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_SLOW_WORK=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=1 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_FREEZER=y + +# +# System Type and features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_SUBARCH_AVR32B=y +CONFIG_MMU=y +CONFIG_PERFORMANCE_COUNTERS=y +CONFIG_PLATFORM_AT32AP=y +CONFIG_CPU_AT32AP700X=y +CONFIG_CPU_AT32AP7000=y +CONFIG_BOARD_ATNGW100_COMMON=y +# CONFIG_BOARD_ATSTK1000 is not set +# CONFIG_BOARD_ATNGW100_MKI is not set +CONFIG_BOARD_ATNGW100_MKII=y +# CONFIG_BOARD_HAMMERHEAD is not set +# CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set +# CONFIG_BOARD_MIMC200 is not set +CONFIG_BOARD_ATNGW100_MKII_LCD=y +# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set +CONFIG_BOARD_ATNGW100_EVKLCD10X=y +# CONFIG_BOARD_ATNGW100_MRMT is not set +CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y +# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set +# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set +CONFIG_LOADER_U_BOOT=y + +# +# Atmel AVR32 AP options +# +# CONFIG_AP700X_32_BIT_SMC is not set +CONFIG_AP700X_16_BIT_SMC=y +# CONFIG_AP700X_8_BIT_SMC is not set +CONFIG_LOAD_ADDRESS=0x10000000 +CONFIG_ENTRY_ADDRESS=0x90000000 +CONFIG_PHYS_OFFSET=0x10000000 +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_QUICKLIST=y +# CONFIG_HAVE_ARCH_BOOTMEM is not set +# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set +# CONFIG_NEED_NODE_MEMMAP_SIZE is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +# CONFIG_ARCH_SPARSEMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +# CONFIG_OWNERSHIP_TRACE is not set +CONFIG_NMI_DEBUGGING=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_CMDLINE="" + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +# CONFIG_CPU_FREQ_STAT is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_AT32AP=y + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_MULTIPLE_TABLES is not set +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +# CONFIG_IP_PIMSM_V2 is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +# CONFIG_IPV6_PRIVACY is not set +# CONFIG_IPV6_ROUTER_PREF is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +# CONFIG_IPV6_MIP6 is not set +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_TUNNEL is not set +# CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_ADVANCED is not set + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +# CONFIG_NF_NAT_TFTP is not set +# CONFIG_NF_NAT_AMANDA is not set +# CONFIG_NF_NAT_PPTP is not set +# CONFIG_NF_NAT_H323 is not set +CONFIG_NF_NAT_SIP=m +CONFIG_IP_NF_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_STP=m +CONFIG_BRIDGE=m +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +CONFIG_CFG80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND=y +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_NAND_ECC_SMC is not set +# CONFIG_MTD_NAND_MUSEUM_IDS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +CONFIG_MTD_NAND_ATMEL=y +CONFIG_MTD_NAND_ATMEL_ECC_HW=y +# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set +# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set +# CONFIG_MTD_NAND_NANDSIM is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_RESERVE=1 +# CONFIG_MTD_UBI_GLUEBI is not set + +# +# UBI debugging options +# +# CONFIG_MTD_UBI_DEBUG is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_MISC_DEVICES=y +# CONFIG_ATMEL_PWM is not set +CONFIG_ATMEL_TCLIB=y +CONFIG_ATMEL_TCB_CLKSRC=y +CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 +# CONFIG_ICS932S401 is not set +# CONFIG_ATMEL_SSC is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_MDIO_BITBANG is not set +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +CONFIG_MACB=y +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +CONFIG_PPP_FILTER=y +CONFIG_PPP_ASYNC=m +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m +# CONFIG_PPPOL2TP is not set +# CONFIG_SLIP is not set +CONFIG_SLHC=m +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879_I2C is not set +# CONFIG_TOUCHSCREEN_AD7879_SPI is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_ATMEL=y +CONFIG_SERIAL_ATMEL_CONSOLE=y +CONFIG_SERIAL_ATMEL_PDC=y +# CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_ATMEL=y +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +CONFIG_AT32AP700X_WDT=y +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_ATMEL=y +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=m +# CONFIG_SND_SEQUENCER is not set +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_HRTIMER=y +# CONFIG_SND_DYNAMIC_MINORS is not set +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set +CONFIG_SND_AC97_CODEC=m +# CONFIG_SND_DRIVERS is not set + +# +# Atmel devices (AVR32 and AT91) +# +# CONFIG_SND_ATMEL_ABDAC is not set +CONFIG_SND_ATMEL_AC97C=m +# CONFIG_SND_SPI is not set +# CONFIG_SND_SOC is not set +# CONFIG_SOUND_PRIME is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_USB_SUPPORT=y +# CONFIG_USB_ARCH_HAS_HCD is not set +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_GADGET_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=350 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_AT91 is not set +CONFIG_USB_GADGET_ATMEL_USBA=y +CONFIG_USB_ATMEL_USBA=y +# CONFIG_USB_GADGET_FSL_USB2 is not set +# CONFIG_USB_GADGET_LH7A40X is not set +# CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set +# CONFIG_USB_GADGET_S3C2410 is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set +# CONFIG_USB_GADGET_DUMMY_HCD is not set +CONFIG_USB_GADGET_DUALSPEED=y +CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set +CONFIG_USB_GADGETFS=m +CONFIG_USB_FILE_STORAGE=m +# CONFIG_USB_FILE_STORAGE_TEST is not set +CONFIG_USB_G_SERIAL=m +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_CDC_COMPOSITE=m + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_UNSAFE_RESUME is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set +CONFIG_MMC_ATMELMCI=y +# CONFIG_MMC_ATMELMCI_DMA is not set +# CONFIG_MMC_SPI is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_AT32AP700X=y +CONFIG_DMADEVICES=y + +# +# DMA Devices +# +CONFIG_DW_DMAC=y +CONFIG_DMA_ENGINE=y + +# +# DMA Clients +# +# CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +# CONFIG_EXT3_FS_XATTR is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_CONFIGFS_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFFS2_LZO is not set +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +CONFIG_UBIFS_FS=y +# CONFIG_UBIFS_FS_XATTR is not set +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +# CONFIG_UBIFS_FS_DEBUG is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +CONFIG_ROOT_NFS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +# CONFIG_NFSD_V4 is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +# CONFIG_SMB_NLS_DEFAULT is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_XATTR is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=m +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +CONFIG_NLS_CODEPAGE_850=m +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_SAMPLES is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=m +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=m +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +CONFIG_CRYPTO_LZO=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig new file mode 100644 index 00000000000..bbf6bc316ec --- /dev/null +++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig @@ -0,0 +1,1549 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.32-rc5 +# Thu Nov 5 15:33:32 2009 +# +CONFIG_AVR32=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +# CONFIG_BASE_FULL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_OPROFILE=m +CONFIG_HAVE_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_CLK=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_SLOW_WORK=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=1 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_FREEZER=y + +# +# System Type and features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_SUBARCH_AVR32B=y +CONFIG_MMU=y +CONFIG_PERFORMANCE_COUNTERS=y +CONFIG_PLATFORM_AT32AP=y +CONFIG_CPU_AT32AP700X=y +CONFIG_CPU_AT32AP7000=y +CONFIG_BOARD_ATNGW100_COMMON=y +# CONFIG_BOARD_ATSTK1000 is not set +# CONFIG_BOARD_ATNGW100_MKI is not set +CONFIG_BOARD_ATNGW100_MKII=y +# CONFIG_BOARD_HAMMERHEAD is not set +# CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set +# CONFIG_BOARD_MIMC200 is not set +CONFIG_BOARD_ATNGW100_MKII_LCD=y +# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set +CONFIG_BOARD_ATNGW100_EVKLCD10X=y +# CONFIG_BOARD_ATNGW100_MRMT is not set +# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set +CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y +# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set +CONFIG_LOADER_U_BOOT=y + +# +# Atmel AVR32 AP options +# +# CONFIG_AP700X_32_BIT_SMC is not set +CONFIG_AP700X_16_BIT_SMC=y +# CONFIG_AP700X_8_BIT_SMC is not set +CONFIG_LOAD_ADDRESS=0x10000000 +CONFIG_ENTRY_ADDRESS=0x90000000 +CONFIG_PHYS_OFFSET=0x10000000 +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_QUICKLIST=y +# CONFIG_HAVE_ARCH_BOOTMEM is not set +# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set +# CONFIG_NEED_NODE_MEMMAP_SIZE is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +# CONFIG_ARCH_SPARSEMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 +CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +# CONFIG_OWNERSHIP_TRACE is not set +CONFIG_NMI_DEBUGGING=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_CMDLINE="" + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +# CONFIG_CPU_FREQ_STAT is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_AT32AP=y + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_MULTIPLE_TABLES is not set +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +# CONFIG_IP_PIMSM_V2 is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +# CONFIG_IPV6_PRIVACY is not set +# CONFIG_IPV6_ROUTER_PREF is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +# CONFIG_IPV6_MIP6 is not set +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_TUNNEL is not set +# CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_ADVANCED is not set + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +# CONFIG_NF_NAT_TFTP is not set +# CONFIG_NF_NAT_AMANDA is not set +# CONFIG_NF_NAT_PPTP is not set +# CONFIG_NF_NAT_H323 is not set +CONFIG_NF_NAT_SIP=m +CONFIG_IP_NF_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_STP=m +CONFIG_BRIDGE=m +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +CONFIG_CFG80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND=y +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_NAND_ECC_SMC is not set +# CONFIG_MTD_NAND_MUSEUM_IDS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +CONFIG_MTD_NAND_ATMEL=y +CONFIG_MTD_NAND_ATMEL_ECC_HW=y +# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set +# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set +# CONFIG_MTD_NAND_NANDSIM is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_RESERVE=1 +# CONFIG_MTD_UBI_GLUEBI is not set + +# +# UBI debugging options +# +# CONFIG_MTD_UBI_DEBUG is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_MISC_DEVICES=y +# CONFIG_ATMEL_PWM is not set +CONFIG_ATMEL_TCLIB=y +CONFIG_ATMEL_TCB_CLKSRC=y +CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 +# CONFIG_ICS932S401 is not set +# CONFIG_ATMEL_SSC is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_MDIO_BITBANG is not set +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +CONFIG_MACB=y +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +CONFIG_PPP_FILTER=y +CONFIG_PPP_ASYNC=m +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m +# CONFIG_PPPOL2TP is not set +# CONFIG_SLIP is not set +CONFIG_SLHC=m +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879_I2C is not set +# CONFIG_TOUCHSCREEN_AD7879_SPI is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_ATMEL=y +CONFIG_SERIAL_ATMEL_CONSOLE=y +CONFIG_SERIAL_ATMEL_PDC=y +# CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_ATMEL=y +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +CONFIG_AT32AP700X_WDT=y +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_ATMEL=y +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=m +# CONFIG_SND_SEQUENCER is not set +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_HRTIMER=y +# CONFIG_SND_DYNAMIC_MINORS is not set +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set +CONFIG_SND_AC97_CODEC=m +# CONFIG_SND_DRIVERS is not set + +# +# Atmel devices (AVR32 and AT91) +# +# CONFIG_SND_ATMEL_ABDAC is not set +CONFIG_SND_ATMEL_AC97C=m +# CONFIG_SND_SPI is not set +# CONFIG_SND_SOC is not set +# CONFIG_SOUND_PRIME is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_USB_SUPPORT=y +# CONFIG_USB_ARCH_HAS_HCD is not set +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_GADGET_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=350 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_AT91 is not set +CONFIG_USB_GADGET_ATMEL_USBA=y +CONFIG_USB_ATMEL_USBA=y +# CONFIG_USB_GADGET_FSL_USB2 is not set +# CONFIG_USB_GADGET_LH7A40X is not set +# CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set +# CONFIG_USB_GADGET_S3C2410 is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set +# CONFIG_USB_GADGET_DUMMY_HCD is not set +CONFIG_USB_GADGET_DUALSPEED=y +CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set +CONFIG_USB_GADGETFS=m +CONFIG_USB_FILE_STORAGE=m +# CONFIG_USB_FILE_STORAGE_TEST is not set +CONFIG_USB_G_SERIAL=m +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_CDC_COMPOSITE=m + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_UNSAFE_RESUME is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set +CONFIG_MMC_ATMELMCI=y +# CONFIG_MMC_ATMELMCI_DMA is not set +# CONFIG_MMC_SPI is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_AT32AP700X=y +CONFIG_DMADEVICES=y + +# +# DMA Devices +# +CONFIG_DW_DMAC=y +CONFIG_DMA_ENGINE=y + +# +# DMA Clients +# +# CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +# CONFIG_EXT3_FS_XATTR is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_CONFIGFS_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFFS2_LZO is not set +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +CONFIG_UBIFS_FS=y +# CONFIG_UBIFS_FS_XATTR is not set +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +# CONFIG_UBIFS_FS_DEBUG is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +CONFIG_ROOT_NFS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +# CONFIG_NFSD_V4 is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +# CONFIG_SMB_NLS_DEFAULT is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_XATTR is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=m +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +CONFIG_NLS_CODEPAGE_850=m +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_SAMPLES is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=m +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=m +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=m +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +CONFIG_CRYPTO_LZO=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index 0abe90adb1a..42dafce0238 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.27-rc1 -# Mon Aug 4 16:02:27 2008 +# Linux kernel version: 2.6.32-rc5 +# Thu Oct 29 13:00:55 2009 # CONFIG_AVR32=y CONFIG_GENERIC_GPIO=y @@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_BUG=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -34,21 +35,36 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS=y @@ -58,38 +74,40 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y -# CONFIG_COMPAT_BRK is not set # CONFIG_BASE_FULL is not set CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y -# CONFIG_MARKERS is not set +CONFIG_TRACEPOINTS=y CONFIG_OPROFILE=m CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y -# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set -# CONFIG_HAVE_IOREMAP_PROT is not set CONFIG_HAVE_KPROBES=y -# CONFIG_HAVE_KRETPROBES is not set -# CONFIG_HAVE_ARCH_TRACEHOOK is not set -# CONFIG_HAVE_DMA_ATTRS is not set -# CONFIG_USE_GENERIC_SMP_HELPERS is not set CONFIG_HAVE_CLK=y -CONFIG_PROC_PAGE_MONITOR=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=1 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -97,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +CONFIG_LBDAF=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -117,7 +132,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_CLASSIC_RCU=y +CONFIG_FREEZER=y # # System Type and features @@ -133,7 +148,12 @@ CONFIG_PLATFORM_AT32AP=y CONFIG_CPU_AT32AP700X=y CONFIG_CPU_AT32AP7000=y CONFIG_BOARD_ATSTK1000=y -# CONFIG_BOARD_ATNGW100 is not set +# CONFIG_BOARD_ATNGW100_MKI is not set +# CONFIG_BOARD_ATNGW100_MKII is not set +# CONFIG_BOARD_HAMMERHEAD is not set +# CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set +# CONFIG_BOARD_MIMC200 is not set CONFIG_BOARD_ATSTK1002=y # CONFIG_BOARD_ATSTK1003 is not set # CONFIG_BOARD_ATSTK1004 is not set @@ -159,7 +179,7 @@ CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set CONFIG_QUICKLIST=y -# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set +# CONFIG_HAVE_ARCH_BOOTMEM is not set # CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set # CONFIG_NEED_NODE_MEMMAP_SIZE is not set CONFIG_ARCH_FLATMEM_ENABLE=y @@ -171,14 +191,16 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 CONFIG_NR_QUICK=2 CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 # CONFIG_OWNERSHIP_TRACE is not set CONFIG_NMI_DEBUGGING=y # CONFIG_HZ_100 is not set @@ -186,7 +208,7 @@ CONFIG_HZ_250=y # CONFIG_HZ_300 is not set # CONFIG_HZ_1000 is not set CONFIG_HZ=250 -# CONFIG_SCHED_HRTICK is not set +CONFIG_SCHED_HRTICK=y CONFIG_CMDLINE="" # @@ -197,6 +219,7 @@ CONFIG_PM=y CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set CONFIG_ARCH_SUSPEND_POSSIBLE=y # @@ -228,6 +251,8 @@ CONFIG_CPU_FREQ_AT32AP=y # Executable file formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set CONFIG_NET=y @@ -295,10 +320,12 @@ CONFIG_IPV6_TUNNEL=m # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set CONFIG_STP=m CONFIG_BRIDGE=m +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set CONFIG_LLC=m @@ -309,26 +336,33 @@ CONFIG_LLC=m # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +CONFIG_CFG80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set # -# Wireless +# CFG80211 needs to be enabled for MAC80211 # -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -340,6 +374,7 @@ CONFIG_LLC=m # Generic Driver Options # CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set @@ -349,6 +384,7 @@ CONFIG_STANDALONE=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -398,17 +434,18 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x8000000 -CONFIG_MTD_PHYSMAP_LEN=0x0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PHYSMAP_COMPAT is not set # CONFIG_MTD_PLATRAM is not set # # Self-contained MTD device drivers # CONFIG_MTD_DATAFLASH=m +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set CONFIG_MTD_M25P80=m CONFIG_M25PXX_USE_FAST_READ=y +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -424,9 +461,22 @@ CONFIG_M25PXX_USE_FAST_READ=y # CONFIG_MTD_ONENAND is not set # +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# # UBI - Unsorted block images # -# CONFIG_MTD_UBI is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_RESERVE=1 +# CONFIG_MTD_UBI_GLUEBI is not set + +# +# UBI debugging options +# +# CONFIG_MTD_UBI_DEBUG is not set # CONFIG_PARPORT is not set CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set @@ -444,10 +494,20 @@ CONFIG_ATMEL_PWM=m CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_TCB_CLKSRC=y CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_ICS932S401 is not set CONFIG_ATMEL_SSC=m # CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HAVE_IDE is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set # # SCSI device support @@ -469,10 +529,6 @@ CONFIG_BLK_DEV_SR=m # CONFIG_BLK_DEV_SR_VENDOR is not set # CONFIG_CHR_DEV_SG is not set # CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set @@ -489,8 +545,10 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SRP_ATTRS is not set # CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set CONFIG_ATA=m # CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y # CONFIG_SATA_PMP is not set CONFIG_ATA_SFF=y # CONFIG_SATA_MV is not set @@ -519,26 +577,37 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set CONFIG_MACB=y # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set CONFIG_PPP=m # CONFIG_PPP_MULTILINK is not set @@ -580,18 +649,25 @@ CONFIG_INPUT_EVDEV=m # Input Device Drivers # CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_QT2160 is not set # CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_MAX7359 is not set # CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set # CONFIG_KEYBOARD_STOWAWAY is not set -CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set CONFIG_INPUT_MOUSE=y # CONFIG_MOUSE_PS2 is not set # CONFIG_MOUSE_SERIAL is not set # CONFIG_MOUSE_VSXXXAA is not set CONFIG_MOUSE_GPIO=m +# CONFIG_MOUSE_SYNAPTICS_I2C is not set # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set # CONFIG_INPUT_TOUCHSCREEN is not set @@ -622,9 +698,11 @@ CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_SERIAL_ATMEL_PDC=y # CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set @@ -633,7 +711,9 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_ALGOBIT=m # @@ -643,6 +723,7 @@ CONFIG_I2C_ALGOBIT=m # # I2C system bus drivers (mostly embedded / system-on-chip) # +# CONFIG_I2C_DESIGNWARE is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set @@ -663,14 +744,6 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_EEPROM_AT24=m -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_TPS65010 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -685,19 +758,28 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_ATMEL=y # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y # +# Memory mapped GPIO expanders: +# + +# # I2C GPIO expanders: # # CONFIG_GPIO_MAX732X is not set @@ -713,11 +795,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -726,11 +812,11 @@ CONFIG_WATCHDOG=y # # CONFIG_SOFT_WATCHDOG is not set CONFIG_AT32AP700X_WDT=y +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # @@ -739,22 +825,17 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_TPS65010 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -764,6 +845,7 @@ CONFIG_SSB_POSSIBLE=y CONFIG_FB=y # CONFIG_FIRMWARE_EDID is not set # CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y CONFIG_FB_CFB_IMAGEBLIT=y @@ -785,10 +867,15 @@ CONFIG_FB_CFB_IMAGEBLIT=y # CONFIG_FB_S1D13XXX is not set CONFIG_FB_ATMEL=y # CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_LMS283GF05 is not set CONFIG_LCD_LTV350QV=y # CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set # CONFIG_LCD_VGG2432A4 is not set # CONFIG_LCD_PLATFORM is not set # CONFIG_BACKLIGHT_CLASS_DEVICE is not set @@ -799,6 +886,8 @@ CONFIG_LCD_LTV350QV=y # CONFIG_DISPLAY_SUPPORT is not set # CONFIG_LOGO is not set CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=m CONFIG_SND_TIMER=m CONFIG_SND_PCM=m @@ -807,12 +896,24 @@ CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_PCM_OSS_PLUGINS=y +# CONFIG_SND_HRTIMER is not set # CONFIG_SND_DYNAMIC_MINORS is not set # CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set # CONFIG_SND_DRIVERS is not set + +# +# Atmel devices (AVR32 and AT91) +# +# CONFIG_SND_ATMEL_ABDAC is not set +# CONFIG_SND_ATMEL_AC97C is not set CONFIG_SND_SPI=y CONFIG_SND_AT73C213=m CONFIG_SND_AT73C213_TARGET_BITRATE=48000 @@ -825,33 +926,43 @@ CONFIG_USB_SUPPORT=y # CONFIG_USB_ARCH_HAS_EHCI is not set # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_GADGET_MUSB_HDRC is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # CONFIG_USB_GADGET=y # CONFIG_USB_GADGET_DEBUG is not set # CONFIG_USB_GADGET_DEBUG_FILES is not set # CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 CONFIG_USB_GADGET_SELECTED=y -# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_AT91 is not set CONFIG_USB_GADGET_ATMEL_USBA=y CONFIG_USB_ATMEL_USBA=y # CONFIG_USB_GADGET_FSL_USB2 is not set -# CONFIG_USB_GADGET_NET2280 is not set -# CONFIG_USB_GADGET_PXA25X is not set -# CONFIG_USB_GADGET_M66592 is not set -# CONFIG_USB_GADGET_PXA27X is not set -# CONFIG_USB_GADGET_GOKU is not set # CONFIG_USB_GADGET_LH7A40X is not set # CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set # CONFIG_USB_GADGET_S3C2410 is not set -# CONFIG_USB_GADGET_AT91 is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_AMD5536UDC is not set +# CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set # CONFIG_USB_GADGET_DUMMY_HCD is not set CONFIG_USB_GADGET_DUALSPEED=y CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set CONFIG_USB_ETH=m CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set CONFIG_USB_GADGETFS=m CONFIG_USB_FILE_STORAGE=m # CONFIG_USB_FILE_STORAGE_TEST is not set @@ -859,12 +970,18 @@ CONFIG_USB_G_SERIAL=m # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set CONFIG_USB_CDC_COMPOSITE=m + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set CONFIG_MMC=y # CONFIG_MMC_DEBUG is not set # CONFIG_MMC_UNSAFE_RESUME is not set # -# MMC/SD Card Drivers +# MMC/SD/SDIO Card Drivers # CONFIG_MMC_BLOCK=y CONFIG_MMC_BLOCK_BOUNCE=y @@ -872,10 +989,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y # CONFIG_MMC_TEST is not set # -# MMC/SD Host Controller Drivers +# MMC/SD/SDIO Host Controller Drivers # # CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set CONFIG_MMC_ATMELMCI=y +# CONFIG_MMC_ATMELMCI_DMA is not set CONFIG_MMC_SPI=m # CONFIG_MEMSTICK is not set CONFIG_NEW_LEDS=y @@ -887,7 +1006,11 @@ CONFIG_LEDS_CLASS=m CONFIG_LEDS_ATMEL_PWM=m # CONFIG_LEDS_PCA9532 is not set CONFIG_LEDS_GPIO=m +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set # CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -895,7 +1018,13 @@ CONFIG_LEDS_GPIO=m CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_HEARTBEAT=m +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# # CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y @@ -927,25 +1056,33 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_M41T80 is not set # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers # # CONFIG_RTC_DRV_M41T94 is not set # CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers # +# CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1511 is not set # CONFIG_RTC_DRV_DS1553 is not set # CONFIG_RTC_DRV_DS1742 is not set # CONFIG_RTC_DRV_STK17TA8 is not set # CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set # CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_V3020 is not set # @@ -964,25 +1101,45 @@ CONFIG_DMA_ENGINE=y # DMA Clients # # CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set # CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# # File systems # CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -# CONFIG_EXT4DEV_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set CONFIG_JBD=y # CONFIG_JBD_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -990,6 +1147,12 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set # # CD-ROM/DVD Filesystems @@ -1013,15 +1176,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1039,7 +1200,14 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set +CONFIG_UBIFS_FS=y +# CONFIG_UBIFS_FS_XATTR is not set +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +# CONFIG_UBIFS_FS_DEBUG is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set CONFIG_MINIX_FS=m # CONFIG_OMFS_FS is not set @@ -1122,6 +1290,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set @@ -1130,6 +1299,9 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1145,6 +1317,7 @@ CONFIG_SCHED_DEBUG=y # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set @@ -1153,13 +1326,39 @@ CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_LKDTM is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set # @@ -1167,19 +1366,30 @@ CONFIG_FRAME_POINTER=y # # CONFIG_KEYS is not set # CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_CRYPTO=y # # Crypto core or helper # -CONFIG_CRYPTO_ALGAPI=m +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=m +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=m +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=m +CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set # CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=m # CONFIG_CRYPTO_TEST is not set @@ -1207,11 +1417,13 @@ CONFIG_CRYPTO_CBC=m # CONFIG_CRYPTO_HMAC=m # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=m # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1228,7 +1440,7 @@ CONFIG_CRYPTO_SHA1=m # # Ciphers # -# CONFIG_CRYPTO_AES is not set +CONFIG_CRYPTO_AES=m # CONFIG_CRYPTO_ANUBIS is not set # CONFIG_CRYPTO_ARC4 is not set # CONFIG_CRYPTO_BLOWFISH is not set @@ -1247,18 +1459,24 @@ CONFIG_CRYPTO_DES=m # # Compression # -CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_LZO is not set +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +CONFIG_CRYPTO_LZO=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m # CONFIG_CRYPTO_HW is not set +CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_BITREVERSE=y -# CONFIG_GENERIC_FIND_FIRST_BIT is not set -# CONFIG_GENERIC_FIND_NEXT_BIT is not set +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m -# CONFIG_CRC16 is not set +CONFIG_CRC16=y CONFIG_CRC_T10DIF=m CONFIG_CRC_ITU_T=m CONFIG_CRC32=y @@ -1266,8 +1484,11 @@ CONFIG_CRC7=m # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_DECOMPRESS_GZIP=y CONFIG_GENERIC_ALLOCATOR=y -CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index c1603c4860e..363e2381f32 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28-rc8 -# Thu Dec 18 11:22:23 2008 +# Linux kernel version: 2.6.32-rc5 +# Thu Oct 29 13:00:25 2009 # CONFIG_AVR32=y CONFIG_GENERIC_GPIO=y @@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_BUG=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -34,21 +35,36 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS=y @@ -58,32 +74,40 @@ CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y -# CONFIG_COMPAT_BRK is not set # CONFIG_BASE_FULL is not set CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y -# CONFIG_MARKERS is not set +CONFIG_TRACEPOINTS=y CONFIG_OPROFILE=m CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_CLK=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=1 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -91,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +CONFIG_LBDAF=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -111,7 +132,6 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_CLASSIC_RCU=y CONFIG_FREEZER=y # @@ -128,8 +148,11 @@ CONFIG_PLATFORM_AT32AP=y CONFIG_CPU_AT32AP700X=y CONFIG_CPU_AT32AP7000=y CONFIG_BOARD_ATSTK1000=y -# CONFIG_BOARD_ATNGW100 is not set +# CONFIG_BOARD_ATNGW100_MKI is not set +# CONFIG_BOARD_ATNGW100_MKII is not set +# CONFIG_BOARD_HAMMERHEAD is not set # CONFIG_BOARD_FAVR_32 is not set +# CONFIG_BOARD_MERISC is not set # CONFIG_BOARD_MIMC200 is not set # CONFIG_BOARD_ATSTK1002 is not set # CONFIG_BOARD_ATSTK1003 is not set @@ -156,7 +179,7 @@ CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set CONFIG_QUICKLIST=y -# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set +# CONFIG_HAVE_ARCH_BOOTMEM is not set # CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set # CONFIG_NEED_NODE_MEMMAP_SIZE is not set CONFIG_ARCH_FLATMEM_ENABLE=y @@ -170,12 +193,14 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 CONFIG_NR_QUICK=2 CONFIG_VIRT_TO_BUS=y -CONFIG_UNEVICTABLE_LRU=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 # CONFIG_OWNERSHIP_TRACE is not set CONFIG_NMI_DEBUGGING=y # CONFIG_HZ_100 is not set @@ -194,6 +219,7 @@ CONFIG_PM=y CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set CONFIG_ARCH_SUSPEND_POSSIBLE=y # @@ -294,6 +320,7 @@ CONFIG_IPV6_TUNNEL=m # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set CONFIG_STP=m @@ -309,20 +336,24 @@ CONFIG_LLC=m # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set # CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -334,6 +365,7 @@ CONFIG_LLC=m # Generic Driver Options # CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_FW_LOADER is not set @@ -343,6 +375,7 @@ CONFIG_STANDALONE=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -393,9 +426,7 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x8000000 -CONFIG_MTD_PHYSMAP_LEN=0x0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PHYSMAP_COMPAT is not set # CONFIG_MTD_PLATRAM is not set # @@ -406,6 +437,7 @@ CONFIG_MTD_DATAFLASH=m CONFIG_MTD_DATAFLASH_OTP=y CONFIG_MTD_M25P80=m CONFIG_M25PXX_USE_FAST_READ=y +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -432,6 +464,11 @@ CONFIG_MTD_NAND_ATMEL_ECC_HW=y # CONFIG_MTD_ONENAND is not set # +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# # UBI - Unsorted block images # CONFIG_MTD_UBI=y @@ -460,13 +497,22 @@ CONFIG_ATMEL_PWM=m CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_TCB_CLKSRC=y CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 -# CONFIG_EEPROM_93CX6 is not set # CONFIG_ICS932S401 is not set CONFIG_ATMEL_SSC=m # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set # +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set + +# # SCSI device support # # CONFIG_RAID_ATTRS is not set @@ -486,10 +532,6 @@ CONFIG_BLK_DEV_SR=m # CONFIG_BLK_DEV_SR_VENDOR is not set # CONFIG_CHR_DEV_SG is not set # CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set @@ -506,8 +548,10 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SRP_ATTRS is not set # CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set CONFIG_ATA=m # CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y # CONFIG_SATA_PMP is not set CONFIG_ATA_SFF=y # CONFIG_SATA_MV is not set @@ -536,12 +580,17 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y # CONFIG_MII is not set CONFIG_MACB=y # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_DNET is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -550,15 +599,18 @@ CONFIG_MACB=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set CONFIG_PPP=m # CONFIG_PPP_MULTILINK is not set @@ -600,18 +652,25 @@ CONFIG_INPUT_EVDEV=m # Input Device Drivers # CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_QT2160 is not set # CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_MAX7359 is not set # CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set # CONFIG_KEYBOARD_STOWAWAY is not set -CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set CONFIG_INPUT_MOUSE=y # CONFIG_MOUSE_PS2 is not set # CONFIG_MOUSE_SERIAL is not set # CONFIG_MOUSE_VSXXXAA is not set CONFIG_MOUSE_GPIO=m +# CONFIG_MOUSE_SYNAPTICS_I2C is not set # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set # CONFIG_INPUT_TOUCHSCREEN is not set @@ -642,9 +701,11 @@ CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_SERIAL_ATMEL_PDC=y # CONFIG_SERIAL_ATMEL_TTYAT is not set +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set @@ -653,6 +714,7 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_ALGOBIT=m @@ -664,6 +726,7 @@ CONFIG_I2C_ALGOBIT=m # # I2C system bus drivers (mostly embedded / system-on-chip) # +# CONFIG_I2C_DESIGNWARE is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set @@ -684,14 +747,6 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_TPS65010 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -706,13 +761,18 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_ATMEL=y # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -738,11 +798,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -764,26 +828,17 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set # CONFIG_MFD_TMIO is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -817,8 +872,10 @@ CONFIG_FB_ATMEL=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_LMS283GF05 is not set CONFIG_LCD_LTV350QV=y # CONFIG_LCD_ILI9320 is not set # CONFIG_LCD_TDO24M is not set @@ -833,6 +890,7 @@ CONFIG_LCD_LTV350QV=y # CONFIG_LOGO is not set CONFIG_SOUND=m CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=m CONFIG_SND_TIMER=m CONFIG_SND_PCM=m @@ -841,16 +899,28 @@ CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_PCM_OSS_PLUGINS=y +# CONFIG_SND_HRTIMER is not set # CONFIG_SND_DYNAMIC_MINORS is not set # CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_DRIVERS=y # CONFIG_SND_DUMMY is not set # CONFIG_SND_MTPAV is not set # CONFIG_SND_SERIAL_U16550 is not set # CONFIG_SND_MPU401 is not set + +# +# Atmel devices (AVR32 and AT91) +# +# CONFIG_SND_ATMEL_ABDAC is not set +# CONFIG_SND_ATMEL_AC97C is not set CONFIG_SND_SPI=y CONFIG_SND_AT73C213=m CONFIG_SND_AT73C213_TARGET_BITRATE=48000 @@ -863,11 +933,10 @@ CONFIG_USB_SUPPORT=y # CONFIG_USB_ARCH_HAS_EHCI is not set # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set -# CONFIG_USB_MUSB_HDRC is not set # CONFIG_USB_GADGET_MUSB_HDRC is not set # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # CONFIG_USB_GADGET=y # CONFIG_USB_GADGET_DEBUG is not set @@ -882,18 +951,25 @@ CONFIG_USB_ATMEL_USBA=y # CONFIG_USB_GADGET_LH7A40X is not set # CONFIG_USB_GADGET_OMAP is not set # CONFIG_USB_GADGET_PXA25X is not set +# CONFIG_USB_GADGET_R8A66597 is not set # CONFIG_USB_GADGET_PXA27X is not set +# CONFIG_USB_GADGET_S3C_HSOTG is not set +# CONFIG_USB_GADGET_IMX is not set # CONFIG_USB_GADGET_S3C2410 is not set # CONFIG_USB_GADGET_M66592 is not set # CONFIG_USB_GADGET_AMD5536UDC is not set # CONFIG_USB_GADGET_FSL_QE is not set +# CONFIG_USB_GADGET_CI13XXX is not set # CONFIG_USB_GADGET_NET2280 is not set # CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LANGWELL is not set # CONFIG_USB_GADGET_DUMMY_HCD is not set CONFIG_USB_GADGET_DUALSPEED=y CONFIG_USB_ZERO=m +# CONFIG_USB_AUDIO is not set CONFIG_USB_ETH=m CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set CONFIG_USB_GADGETFS=m CONFIG_USB_FILE_STORAGE=m # CONFIG_USB_FILE_STORAGE_TEST is not set @@ -901,6 +977,12 @@ CONFIG_USB_G_SERIAL=m # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set # CONFIG_USB_CDC_COMPOSITE is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_NOP_USB_XCEIV is not set CONFIG_MMC=y # CONFIG_MMC_DEBUG is not set # CONFIG_MMC_UNSAFE_RESUME is not set @@ -917,6 +999,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y # MMC/SD/SDIO Host Controller Drivers # # CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set CONFIG_MMC_ATMELMCI=y # CONFIG_MMC_ATMELMCI_DMA is not set CONFIG_MMC_SPI=m @@ -930,7 +1013,11 @@ CONFIG_LEDS_CLASS=m CONFIG_LEDS_ATMEL_PWM=m # CONFIG_LEDS_PCA9532 is not set CONFIG_LEDS_GPIO=m +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set # CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -939,7 +1026,12 @@ CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_HEARTBEAT=m # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# # CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y @@ -972,6 +1064,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -983,6 +1076,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1014,32 +1108,42 @@ CONFIG_DMA_ENGINE=y # DMA Clients # # CONFIG_NET_DMA is not set +# CONFIG_ASYNC_TX_DMA is not set # CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set -CONFIG_STAGING_EXCLUDE_BUILD=y # # File systems # -CONFIG_EXT2_FS=m +CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set -CONFIG_EXT3_FS=m +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=m -CONFIG_EXT4DEV_COMPAT=y +CONFIG_EXT4_FS=y # CONFIG_EXT4_FS_XATTR is not set -CONFIG_JBD=m +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD=y # CONFIG_JBD_DEBUG is not set -CONFIG_JBD2=m +CONFIG_JBD2=y # CONFIG_JBD2_DEBUG is not set # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1047,6 +1151,12 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set # # CD-ROM/DVD Filesystems @@ -1076,10 +1186,7 @@ CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1099,12 +1206,13 @@ CONFIG_JFFS2_ZLIB=y CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set CONFIG_UBIFS_FS=y -CONFIG_UBIFS_FS_XATTR=y +# CONFIG_UBIFS_FS_XATTR is not set # CONFIG_UBIFS_FS_ADVANCED_COMPR is not set CONFIG_UBIFS_FS_LZO=y CONFIG_UBIFS_FS_ZLIB=y # CONFIG_UBIFS_FS_DEBUG is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set CONFIG_MINIX_FS=m # CONFIG_OMFS_FS is not set @@ -1124,7 +1232,6 @@ CONFIG_LOCKD=y CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -1188,6 +1295,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set @@ -1196,6 +1304,9 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1211,6 +1322,7 @@ CONFIG_SCHED_DEBUG=y # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set @@ -1219,6 +1331,8 @@ CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set CONFIG_FRAME_POINTER=y # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set @@ -1226,17 +1340,30 @@ CONFIG_FRAME_POINTER=y # CONFIG_KPROBES_SANITY_TEST is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_LKDTM is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y # CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set # @@ -1262,10 +1389,12 @@ CONFIG_CRYPTO_HASH=m CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_RNG=m CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=m CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set # CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=m # CONFIG_CRYPTO_TEST is not set @@ -1293,11 +1422,13 @@ CONFIG_CRYPTO_CBC=m # CONFIG_CRYPTO_HMAC=m # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=m # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1334,6 +1465,7 @@ CONFIG_CRYPTO_DES=m # Compression # CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set CONFIG_CRYPTO_LZO=y # @@ -1341,11 +1473,13 @@ CONFIG_CRYPTO_LZO=y # CONFIG_CRYPTO_ANSI_CPRNG=m # CONFIG_CRYPTO_HW is not set +CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m CONFIG_CRC16=y CONFIG_CRC_T10DIF=m @@ -1357,8 +1491,9 @@ CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y +CONFIG_DECOMPRESS_GZIP=y CONFIG_GENERIC_ALLOCATOR=y -CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/avr32/include/asm/hardirq.h b/arch/avr32/include/asm/hardirq.h index 015bc75ea79..9e36e3ff77d 100644 --- a/arch/avr32/include/asm/hardirq.h +++ b/arch/avr32/include/asm/hardirq.h @@ -1,23 +1,6 @@ #ifndef __ASM_AVR32_HARDIRQ_H #define __ASM_AVR32_HARDIRQ_H - -#include <linux/threads.h> -#include <asm/irq.h> - #ifndef __ASSEMBLY__ - -#include <linux/cache.h> - -/* entry.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -void ack_bad_irq(unsigned int irq); - -/* Standard mappings for irq_cpustat_t above */ -#include <linux/irq_cpustat.h> - +#include <asm-generic/hardirq.h> #endif /* __ASSEMBLY__ */ - #endif /* __ASM_AVR32_HARDIRQ_H */ diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c index 9f572229d31..9604f7758f9 100644 --- a/arch/avr32/kernel/irq.c +++ b/arch/avr32/kernel/irq.c @@ -16,15 +16,6 @@ #include <linux/seq_file.h> #include <linux/sysdev.h> -/* - * 'what should we do if we get a hw irq event on an illegal vector'. - * each architecture has to answer this themselves. - */ -void ack_bad_irq(unsigned int irq) -{ - printk("unexpected IRQ %u\n", irq); -} - /* May be overridden by platform code */ int __weak nmi_enable(void) { @@ -51,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -66,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index c4b56654349..9cd2bd91d64 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -39,30 +39,10 @@ SECTIONS __tagtable_begin = .; *(.taglist.init) __tagtable_end = .; - INIT_DATA - . = ALIGN(16); - __setup_start = .; - *(.init.setup) - __setup_end = .; - . = ALIGN(4); - __initcall_start = .; - INITCALLS - __initcall_end = .; - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - __security_initcall_start = .; - *(.security_initcall.init) - __security_initcall_end = .; -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(32); - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; -#endif - . = ALIGN(PAGE_SIZE); - __init_end = .; } + INIT_DATA_SECTION(16) + . = ALIGN(PAGE_SIZE); + __init_end = .; .text : AT(ADDR(.text) - LOAD_OFFSET) { _evba = .; @@ -78,34 +58,16 @@ SECTIONS _etext = .; } = 0xd703d703 - . = ALIGN(4); - __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } - + EXCEPTION_TABLE(4) RODATA - . = ALIGN(THREAD_SIZE); - .data : AT(ADDR(.data) - LOAD_OFFSET) { _data = .; _sdata = .; - /* - * First, the init task union, aligned to an 8K boundary. - */ - *(.data.init_task) - /* Then, the page-aligned data */ - . = ALIGN(PAGE_SIZE); - *(.data.page_aligned) - - /* Then, the cacheline aligned data */ - . = ALIGN(L1_CACHE_BYTES); - *(.data.cacheline_aligned) - - /* And the rest... */ + INIT_TASK_DATA(THREAD_SIZE) + PAGE_ALIGNED_DATA(PAGE_SIZE); + CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) *(.data.rel*) DATA_DATA CONSTRUCTORS @@ -113,16 +75,8 @@ SECTIONS _edata = .; } - - . = ALIGN(8); - .bss : AT(ADDR(.bss) - LOAD_OFFSET) { - __bss_start = .; - *(.bss) - *(COMMON) - . = ALIGN(8); - __bss_stop = .; - _end = .; - } + BSS_SECTION(0, 8, 8) + _end = .; DWARF_DEBUG diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index eb9d4dc2e86..1aa1ea5e921 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -15,6 +15,8 @@ #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/usb/atmel_usba_udc.h> + +#include <mach/atmel-mci.h> #include <linux/atmel-mci.h> #include <asm/io.h> @@ -1181,19 +1183,32 @@ static struct resource atmel_spi1_resource[] = { DEFINE_DEV(atmel_spi, 1); DEV_CLK(spi_clk, atmel_spi1, pba, 1); -static void __init -at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, - unsigned int n, const u8 *pins) +void __init +at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n) { + /* + * Manage the chipselects as GPIOs, normally using the same pins + * the SPI controller expects; but boards can use other pins. + */ + static u8 __initdata spi_pins[][4] = { + { GPIO_PIN_PA(3), GPIO_PIN_PA(4), + GPIO_PIN_PA(5), GPIO_PIN_PA(20) }, + { GPIO_PIN_PB(2), GPIO_PIN_PB(3), + GPIO_PIN_PB(4), GPIO_PIN_PA(27) }, + }; unsigned int pin, mode; + /* There are only 2 SPI controllers */ + if (bus_num > 1) + return; + for (; n; n--, b++) { b->bus_num = bus_num; if (b->chip_select >= 4) continue; pin = (unsigned)b->controller_data; if (!pin) { - pin = pins[b->chip_select]; + pin = spi_pins[bus_num][b->chip_select]; b->controller_data = (void *)pin; } mode = AT32_GPIOF_OUTPUT; @@ -1206,16 +1221,6 @@ at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, struct platform_device *__init at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n) { - /* - * Manage the chipselects as GPIOs, normally using the same pins - * the SPI controller expects; but boards can use other pins. - */ - static u8 __initdata spi0_pins[] = - { GPIO_PIN_PA(3), GPIO_PIN_PA(4), - GPIO_PIN_PA(5), GPIO_PIN_PA(20), }; - static u8 __initdata spi1_pins[] = - { GPIO_PIN_PB(2), GPIO_PIN_PB(3), - GPIO_PIN_PB(4), GPIO_PIN_PA(27), }; struct platform_device *pdev; u32 pin_mask; @@ -1228,7 +1233,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n) select_peripheral(PIOA, (1 << 0), PERIPH_A, AT32_GPIOF_PULLUP); select_peripheral(PIOA, pin_mask, PERIPH_A, 0); - at32_spi_setup_slaves(0, b, n, spi0_pins); + at32_spi_setup_slaves(0, b, n); break; case 1: @@ -1239,7 +1244,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n) select_peripheral(PIOB, (1 << 0), PERIPH_B, AT32_GPIOF_PULLUP); select_peripheral(PIOB, pin_mask, PERIPH_B, 0); - at32_spi_setup_slaves(1, b, n, spi1_pins); + at32_spi_setup_slaves(1, b, n); break; default: @@ -1320,7 +1325,7 @@ struct platform_device *__init at32_add_device_mci(unsigned int id, struct mci_platform_data *data) { struct platform_device *pdev; - struct dw_dma_slave *dws = &data->dma_slave; + struct mci_dma_slave *slave; u32 pioa_mask; u32 piob_mask; @@ -1339,13 +1344,17 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) ARRAY_SIZE(atmel_mci0_resource))) goto fail; - dws->dma_dev = &dw_dmac0_device.dev; - dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; - dws->cfg_hi = (DWC_CFGH_SRC_PER(0) + slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); + + slave->sdata.dma_dev = &dw_dmac0_device.dev; + slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; + slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0) | DWC_CFGH_DST_PER(1)); - dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL + slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + data->dma_slave = slave; + if (platform_device_add_data(pdev, data, sizeof(struct mci_platform_data))) goto fail; @@ -1411,6 +1420,8 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) return pdev; fail: + data->dma_slave = NULL; + kfree(slave); platform_device_put(pdev); return NULL; } diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h new file mode 100644 index 00000000000..a9b38967f70 --- /dev/null +++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h @@ -0,0 +1,24 @@ +#ifndef __MACH_ATMEL_MCI_H +#define __MACH_ATMEL_MCI_H + +#include <linux/dw_dmac.h> + +/** + * struct mci_dma_data - DMA data for MCI interface + */ +struct mci_dma_data { + struct dw_dma_slave sdata; +}; + +/* accessor macros */ +#define slave_data_ptr(s) (&(s)->sdata) +#define find_slave_dev(s) ((s)->sdata.dma_dev) + +#define setup_dma_addr(s, t, r) do { \ + if (s) { \ + (s)->sdata.tx_reg = (t); \ + (s)->sdata.rx_reg = (r); \ + } \ +} while (0) + +#endif /* __MACH_ATMEL_MCI_H */ diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h index ddedb471f33..c7f25bb1d06 100644 --- a/arch/avr32/mach-at32ap/include/mach/board.h +++ b/arch/avr32/mach-at32ap/include/mach/board.h @@ -49,6 +49,7 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data); struct spi_board_info; struct platform_device * at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n); +void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n); struct atmel_lcdfb_info; struct platform_device * diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index b0c7f0ee4b0..1942ccfedbe 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); asmlinkage void __raw_spin_lock_asm(volatile int *ptr); asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); -asmlinkage void __raw_read_lock_asm(volatile int *ptr); -asmlinkage int __raw_read_trylock_asm(volatile int *ptr); -asmlinkage void __raw_read_unlock_asm(volatile int *ptr); -asmlinkage void __raw_write_lock_asm(volatile int *ptr); -asmlinkage int __raw_write_trylock_asm(volatile int *ptr); -asmlinkage void __raw_write_unlock_asm(volatile int *ptr); - -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +asmlinkage void arch_read_lock_asm(volatile int *ptr); +asmlinkage int arch_read_trylock_asm(volatile int *ptr); +asmlinkage void arch_read_unlock_asm(volatile int *ptr); +asmlinkage void arch_write_lock_asm(volatile int *ptr); +asmlinkage int arch_write_trylock_asm(volatile int *ptr); +asmlinkage void arch_write_unlock_asm(volatile int *ptr); + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __raw_spin_is_locked_asm(&lock->lock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __raw_spin_lock_asm(&lock->lock); } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return __raw_spin_trylock_asm(&lock->lock); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __raw_spin_unlock_asm(&lock->lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_read_can_lock(raw_rwlock_t *rw) +static inline int arch_read_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *rw) +static inline int arch_write_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - __raw_read_lock_asm(&rw->lock); + arch_read_lock_asm(&rw->lock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return __raw_read_trylock_asm(&rw->lock); + return arch_read_trylock_asm(&rw->lock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - __raw_read_unlock_asm(&rw->lock); + arch_read_unlock_asm(&rw->lock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - __raw_write_lock_asm(&rw->lock); + arch_write_lock_asm(&rw->lock); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return __raw_write_trylock_asm(&rw->lock); + return arch_write_trylock_asm(&rw->lock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { - __raw_write_unlock_asm(&rw->lock); + arch_write_unlock_asm(&rw->lock); } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index be75762c061..1a33608c958 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -15,14 +15,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index db9f9c91f11..64cff54a8a5 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq) static struct irq_desc bad_irq_desc = { .handle_irq = handle_bad_irq, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), }; #ifdef CONFIG_CPUMASK_OFFSTACK @@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v) unsigned long flags; if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 78cb3d38f89..9636bace00e 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp) if (fp->ipend & ~0x3F) { for (i = 0; i < (NR_IRQS - 1); i++) { if (!in_atomic) - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) @@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp) verbose_printk("\n"); unlock: if (!in_atomic) - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } } diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 367a53ea10c..f171a6600fb 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); extern void cris_spin_lock(void *l); extern int cris_spin_trylock(void *l); -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { return *(volatile signed char *)(&(x)->slock) <= 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ volatile ("move.d %1,%0" \ : "=m" (lock->slock) \ @@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return cris_spin_trylock((void *)&lock->slock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { cris_spin_lock((void *)&lock->slock); } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } /* @@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) * */ -static inline int __raw_read_can_lock(raw_rwlock_t *x) +static inline int arch_read_can_lock(arch_rwlock_t *x) { return (int)(x)->lock > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *x) +static inline int arch_write_can_lock(arch_rwlock_t *x) { return (x)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock == 0); rw->lock--; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = 0; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); rw->lock++; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = RW_LOCK_BIAS; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock != 0) { rw->lock--; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock == RW_LOCK_BIAS) { rw->lock = 0; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return 1; } #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_ARCH_SPINLOCK_H */ diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 0ca7d9892cc..b5ce0724a88 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index af3e824b91b..62d1aba615d 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { seq_printf(p, "%3d: ", i); @@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); } diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 5c913d47211..c25dc2c2b1d 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_puts(p, " CPU0"); if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9f..6ebc229a1c5 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) * @addr: Address to start counting from * * Similarly to clear_bit_unlock, the implementation uses a store - * with release semantics. See also __raw_spin_unlock(). + * with release semantics. See also arch_spin_unlock(). */ static __inline__ void __clear_bit_unlock(int nr, void *addr) diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 3499ff57bf4..6a8a27cfae3 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h @@ -22,8 +22,6 @@ #include <asm/mmzone.h> -#define NUMA_NO_NODE -1 - extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; extern pg_data_t *pgdat_list[MAX_NUMNODES]; diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516..1a91c9121d1 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -17,7 +17,7 @@ #include <asm/intrinsics.h> #include <asm/system.h> -#define __raw_spin_lock_init(x) ((x)->lock = 0) +#define arch_spin_lock_init(x) ((x)->lock = 0) /* * Ticket locks are conceptually two parts, one indicating the current head of @@ -38,7 +38,7 @@ #define TICKET_BITS 15 #define TICKET_MASK ((1 << TICKET_BITS) - 1) -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket, serve; @@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) } } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->lock); @@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return 0; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; @@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) ACCESS_ONCE(*p) = (tmp + 2) & ~1; } -static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket; @@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) } } -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } -#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) +#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) +#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) +arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) : "p6", "p7", "r2", "memory"); } -#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) +#define arch_read_lock(lock) arch_read_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) -#define __raw_read_lock(rw) \ +#define arch_read_lock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -188,16 +188,16 @@ do { \ #endif /* !ASM_SUPPORTED */ -#define __raw_read_unlock(rw) \ +#define arch_read_unlock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) +arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); } -#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) +#define arch_write_lock(rw) arch_write_lock_flags(rw, 0) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ register long result; \ \ @@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #else /* !ASM_SUPPORTED */ -#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) +#define arch_write_lock_flags(l, flags) arch_write_lock(l) -#define __raw_write_lock(l) \ +#define arch_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ @@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) } while (ia64_val); \ }) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ __u64 ia64_val; \ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(raw_rwlock_t *x) +static inline int arch_read_trylock(arch_rwlock_t *x) { union { - raw_rwlock_t lock; + arch_rwlock_t lock; __u32 word; } old, new; old.lock = new.lock = *x; @@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4..e2b42a52a6d 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -7,15 +7,15 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int read_counter : 31; volatile unsigned int write_lock : 1; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } #endif diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908..95ac77aeae9 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, goto unlock_iosapic_lock; } - spin_lock(&irq_desc[irq].lock); + raw_spin_lock(&irq_desc[irq].lock); dest = get_target_cpu(gsi, irq); dmode = choose_dmode(); err = register_intr(gsi, irq, dmode, polarity, trigger); if (err < 0) { - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); irq = err; goto unlock_iosapic_lock; } @@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, irq_to_vector(irq)); - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); unlock_iosapic_lock: spin_unlock_irqrestore(&iosapic_lock, flags); return irq; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7..94ee9d067cb 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); return 0; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a..70e4bad2343 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) desc = irq_desc + irq; cfg = irq_cfg + irq; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; @@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923883b..179a06489b1 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -24,19 +24,19 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) /** - * __raw_spin_trylock - Try spin lock and return a result + * arch_spin_trylock - Try spin lock and return a result * @lock: Pointer to the lock variable * - * __raw_spin_trylock() tries to get the lock and returns a result. + * arch_spin_trylock() tries to get the lock and returns a result. * On the m32r, the result value is 1 (= Success) or 0 (= Failure). */ -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { int oldval; unsigned long tmp1, tmp2; @@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_trylock \n\t" + "# arch_spin_trylock \n\t" "ldi %1, #0; \n\t" "mvfc %2, psw; \n\t" "clrpsw #0x40 -> nop; \n\t" @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (oldval > 0); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp0, tmp1; @@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_lock \n\t" + "# arch_spin_lock \n\t" ".fillinsn \n" "1: \n\t" "mvfc %1, psw; \n\t" @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { mb(); lock->slock = 1; @@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +#define arch_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t*)lock; if (atomic_dec_return(count) >= 0) @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) return 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_M32R_SPINLOCK_H */ diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 83f52105c0e..92e27672661 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -7,17 +7,17 @@ typedef struct { volatile int slock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 8dfd31e87c4..3c71f776872 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 7d5ddd62d4d..0f06034d1fe 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < nr_irq) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5b60a09a0f0..21ef9efbde4 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -34,33 +34,33 @@ * becomes equal to the the initial value of the tail. */ -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return ((counters >> 14) ^ counters) & 0x1fff; } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - while (__raw_spin_is_locked(x)) { cpu_relax(); } +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + while (arch_spin_is_locked(x)) { cpu_relax(); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return (((counters >> 14) - counters) & 0x1fff) > 1; } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { int my_ticket; int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) [my_ticket] "=&r" (my_ticket)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_llsc_mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { int tmp; @@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " # __raw_spin_unlock \n" + " # arch_spin_unlock \n" "1: ll %[ticket], %[ticket_ptr] \n" " addiu %[ticket], %[ticket], 1 \n" " ori %[ticket], %[ticket], 0x2000 \n" @@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) [ticket] "=&r" (tmp)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_unlock \n" + " .set push # arch_spin_unlock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) } } -static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) +static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) { int tmp, tmp2, tmp3; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) [now_serving] "=&r" (tmp3)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) /* * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_write_can_lock(rw) (!(rw)->lock) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 1b \n" " addu %1, 1 \n" @@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 2f \n" " addu %1, 1 \n" @@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) if (R10000_LLSC_WAR) { __asm__ __volatile__( - "1: ll %1, %2 # __raw_read_unlock \n" + "1: ll %1, %2 # arch_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" @@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_unlock \n" + " .set noreorder # arch_read_unlock \n" "1: ll %1, %2 \n" " sub %1, 1 \n" " sc %1, %0 \n" @@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " lui %1, 0x8000 \n" @@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 2f \n" " lui %1, 0x8000 \n" @@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_llsc_mb(); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( - " # __raw_write_unlock \n" + " # arch_write_unlock \n" " sw $0, %0 \n" : "=m" (rw->lock) : "m" (rw->lock) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return ret; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index adeedaa116c..ee197c2f9c9 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -12,14 +12,14 @@ typedef struct { * bits 15..28: ticket */ unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7b845ba9dff..8b0b4181219 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 6d39e222b17..6153b6a05cc 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c @@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_macint); @@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_macint); @@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_dsiuint); @@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_dsiuint); @@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_set(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_firint); @@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_firint); @@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, PCIINT0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, SCUINT0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_set(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, BCUINTR); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) pin = SYSINT1_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); intassign0 = icu1_read(INTASSIGN0); intassign1 = icu1_read(INTASSIGN1); @@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) intassign1 |= (uint16_t)assign << 9; break; default: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN0, intassign0); icu1_write(INTASSIGN1, intassign1); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return 0; } @@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) pin = SYSINT2_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); intassign2 = icu1_read(INTASSIGN2); intassign3 = icu1_read(INTASSIGN3); @@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) intassign3 |= (uint16_t)assign << 12; break; default: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN2, intassign2); icu1_write(INTASSIGN3, intassign3); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return 0; } diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 4c3c58ef5cd..e2d5ed891f3 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v) /* display information rows, one per active CPU */ case 1 ... NR_IRQS - 1: - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { @@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); break; /* polish off with NMI and error counters */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 8bc9e96699b..716634d1f54 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -27,19 +27,19 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - __raw_spin_lock(s); \ + arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ - __raw_spin_unlock(s); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fae03e136fa..74036f436a3 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -5,17 +5,17 @@ #include <asm/processor.h> #include <asm/spinlock_types.h> -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); return *a == 0; } -#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) -static inline void __raw_spin_lock_flags(raw_spinlock_t *x, +static inline void arch_spin_lock_flags(arch_spinlock_t *x, unsigned long flags) { volatile unsigned int *a; @@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x, mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *x) +static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; mb(); @@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x) mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *x) +static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; int ret; @@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_lock(raw_rwlock_t *rw) +static __inline__ void arch_read_lock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) +static __inline__ void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter--; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) +static __inline__ int arch_read_trylock(arch_rwlock_t *rw) { unsigned long flags; retry: local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); return 1; } @@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) return 0; /* Wait until we have a realistic chance at the lock */ - while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) + while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) cpu_relax(); goto retry; @@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void __raw_write_lock(raw_rwlock_t *rw) +static __inline__ void arch_write_lock(arch_rwlock_t *rw) { unsigned long flags; retry: local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); if (rw->counter != 0) { - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); while (rw->counter != 0) @@ -141,27 +141,27 @@ retry: local_irq_restore(flags); } -static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) +static __inline__ void arch_write_unlock(arch_rwlock_t *rw) { rw->counter = 0; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) +static __inline__ int arch_write_trylock(arch_rwlock_t *rw) { unsigned long flags; int result = 0; local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { if (rw->counter == 0) { rw->counter = -1; result = 1; } else { /* Read-locked. Oh well. */ - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } } local_irq_restore(flags); @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) +static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) { return rw->counter >= 0; } @@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) +static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) { return !rw->counter; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 3f72f47cf4b..8c373aa28a8 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -4,18 +4,18 @@ typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; -# define __RAW_SPIN_LOCK_UNLOCKED { 1 } +# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else volatile unsigned int lock[4]; -# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } #endif -} raw_spinlock_t; +} arch_spinlock_t; typedef struct { - raw_spinlock_t lock; + arch_spinlock_t lock; volatile int counter; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #endif diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 2e7610cb33d..f47465e8d04 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v) if (i < NR_IRQS) { struct irqaction *action; - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index e3eb739fab1..353963d4205 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -12,8 +12,8 @@ #include <asm/atomic.h> #ifdef CONFIG_SMP -raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED +arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 168fce72620..20de73c3668 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - raw_spinlock_t lock; + arch_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 198266cf9e2..764094cff68 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,7 +28,7 @@ #include <asm/asm-compat.h> #include <asm/synch.h> -#define __raw_spin_is_locked(x) ((x)->slock != 0) +#define arch_spin_is_locked(x) ((x)->slock != 0) #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ @@ -54,7 +54,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) +static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, token; @@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; - return arch_spin_trylock(lock) == 0; + return __arch_spin_trylock(lock) == 0; } /* @@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_lppaca()->shared_proc) -extern void __spin_yield(raw_spinlock_t *lock); -extern void __rw_yield(raw_rwlock_t *lock); +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() #define __rw_yield(x) barrier() #define SHARED_PROCESSOR 0 #endif -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; do { HMT_low(); @@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) } static inline -void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; local_save_flags(flags_dis); local_irq_restore(flags); @@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { SYNC_IO; - __asm__ __volatile__("# __raw_spin_unlock\n\t" + __asm__ __volatile__("# arch_spin_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; } #ifdef CONFIG_PPC64 -extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); +extern void arch_spin_unlock_wait(arch_spinlock_t *lock); #else -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) #endif /* @@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); * read-locks. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_write_can_lock(rw) (!(rw)->lock) #ifdef CONFIG_PPC64 #define __DO_SIGN_EXTEND "extsw %0,%0\n" @@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static inline long arch_read_trylock(raw_rwlock_t *rw) +static inline long __arch_read_trylock(arch_rwlock_t *rw) { long tmp; @@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw) * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static inline long arch_write_trylock(raw_rwlock_t *rw) +static inline long __arch_write_trylock(arch_rwlock_t *rw) { long tmp, token; @@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw) return tmp; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_read_trylock(rw) > 0)) + if (likely(__arch_read_trylock(rw) > 0)) break; do { HMT_low(); @@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_write_trylock(rw) == 0)) + if (likely(__arch_write_trylock(rw) == 0)) break; do { HMT_low(); @@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) } } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return arch_read_trylock(rw) > 0; + return __arch_read_trylock(rw) > 0; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return arch_write_trylock(rw) == 0; + return __arch_write_trylock(rw) == 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { long tmp; @@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cr0", "xer", "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__("# write_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); rw->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) __spin_yield(lock) -#define _raw_read_relax(lock) __rw_yield(lock) -#define _raw_write_relax(lock) __rw_yield(lock) +#define arch_spin_relax(lock) __spin_yield(lock) +#define arch_read_relax(lock) __rw_yield(lock) +#define arch_write_relax(lock) __rw_yield(lock) #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 74236c9f05b..2351adc4fdc 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile signed int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f6dca4f4b29..9040330b053 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); action = desc->action; if (!action || !action->handler) @@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private) if (!desc) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && desc->action->handler) { seq_printf(m, "%5d ", i); @@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private) seq_printf(m, "%s\n", p); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } return 0; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index bf90361bb70..fd0d29493fd 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -42,7 +42,7 @@ #include <asm/mmu.h> struct rtas_t rtas = { - .lock = __RAW_SPIN_LOCK_UNLOCKED + .lock = __ARCH_SPIN_LOCK_UNLOCKED }; EXPORT_SYMBOL(rtas); @@ -80,13 +80,13 @@ static unsigned long lock_rtas(void) local_irq_save(flags); preempt_disable(); - __raw_spin_lock_flags(&rtas.lock, flags); + arch_spin_lock_flags(&rtas.lock, flags); return flags; } static void unlock_rtas(unsigned long flags) { - __raw_spin_unlock(&rtas.lock); + arch_spin_unlock(&rtas.lock); local_irq_restore(flags); preempt_enable(); } @@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node, return 1; } -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static u64 timebase = 0; void __cpuinit rtas_give_timebase(void) @@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void) { while (!timebase) barrier(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 79d0fa3a470..58e14fba11b 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -25,7 +25,7 @@ #include <asm/smp.h> #include <asm/firmware.h> -void __spin_yield(raw_spinlock_t *lock) +void __spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock) * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ -void __rw_yield(raw_rwlock_t *rw) +void __rw_yield(arch_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; @@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) } #endif -void __raw_spin_unlock_wait(raw_spinlock_t *lock) +void arch_spin_unlock_wait(arch_spinlock_t *lock) { while (lock->slock) { HMT_low(); @@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock) HMT_medium(); } -EXPORT_SYMBOL(__raw_spin_unlock_wait); +EXPORT_SYMBOL(arch_spin_unlock_wait); diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index cc0c854291d..0bac3a3dbec 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) u32 status, enable; /* Mask off the cascaded IRQ */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->chip->mask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs * are pending. 'ffs()' is 1 based */ @@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) } /* Processing done; can reenable the cascade now */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED)) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int media5200_irq_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 7267effc807..6829cf7e2bd 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -237,7 +237,7 @@ extern int noirqdebug; static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) { - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); @@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) goto out_eoi; desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_eoi: desc->chip->eoi(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int iic_host_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 07762259c60..86c4b29eea8 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs() struct irq_desc *desc = irq_to_desc(irq); if (desc && desc->chip && desc->chip->startup) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->chip->startup(irq); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } } diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index a4619347aa7..242f8095c2d 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -71,7 +71,7 @@ static void pas_restart(char *cmd) } #ifdef CONFIG_SMP -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static unsigned long timebase; static void __devinit pas_give_timebase(void) @@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); mtspr(SPRN_TBCTL, TBCTL_FREEZE); isync(); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void) while (!timebase) smp_rmb(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } struct smp_ops_t pas_smp_ops = { diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 7d01b58f398..b9b9e11609e 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void) || desc->chip->set_affinity == NULL) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { @@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void) cpumask_setall(irq_to_desc(virq)->affinity); desc->chip->set_affinity(virq, cpu_all_mask); unlock: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 62e50258cde..c6e11b07710 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) u32 intr_index; u32 have_shift = 0; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { if (desc->chip->mask_ack) desc->chip->mask_ack(irq); @@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) break; } unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int __devinit fsl_of_msi_probe(struct of_device *dev, diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 7d10074b330..6f220a913e4 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) int src; int subvirq; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->mask(virq); else desc->chip->mask_ack(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); msr = mfdcr(uic->dcrbase + UIC_MSR); if (!msr) /* spurious interrupt */ @@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) generic_handle_irq(subvirq); uic_irq_ret: - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static struct uic * __init uic_init_one(struct device_node *node) diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 495589950dc..5c91995b74e 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -551,7 +551,7 @@ static int appldata_thaw(struct device *dev) return appldata_restore(dev); } -static struct dev_pm_ops appldata_pm_ops = { +static const struct dev_pm_ops appldata_pm_ops = { .freeze = appldata_freeze, .thaw = appldata_thaw, .restore = appldata_restore, diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7a..a587907d77f 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) \ - _raw_spin_relax(lock); } while (0) +#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) \ + arch_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(raw_spinlock_t *); -extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(raw_spinlock_t *); -extern void _raw_spin_relax(raw_spinlock_t *lock); +extern void arch_spin_lock_wait(arch_spinlock_t *); +extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int arch_spin_trylock_retry(arch_spinlock_t *); +extern void arch_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(raw_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait(lp); + arch_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait_flags(lp, flags); + arch_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(raw_spinlock_t *lp) +static inline int arch_spin_trylock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return 1; - return _raw_spin_trylock_retry(lp); + return arch_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(raw_spinlock_t *lp) +static inline void arch_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } @@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock >= 0) +#define arch_read_can_lock(x) ((int)(x)->lock >= 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) -extern void _raw_read_lock_wait(raw_rwlock_t *lp); -extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_read_trylock_retry(raw_rwlock_t *lp); -extern void _raw_write_lock_wait(raw_rwlock_t *lp); -extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_write_trylock_retry(raw_rwlock_t *lp); +extern void _raw_read_lock_wait(arch_rwlock_t *lp); +extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_read_trylock_retry(arch_rwlock_t *lp); +extern void _raw_write_lock_wait(arch_rwlock_t *lp); +extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_write_trylock_retry(arch_rwlock_t *lp); -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; return _raw_write_trylock_retry(rw); } -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 654abc40de0..9c76656a0af 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int owner_cpu; -} __attribute__ ((aligned (4))) raw_spinlock_t; +} __attribute__ ((aligned (4))) arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 071c81f179e..0168472b2fd 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -18,6 +18,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/sysctl.h> #include <asm/uaccess.h> #include <linux/module.h> @@ -1178,7 +1179,7 @@ debug_get_uint(char *buf) { int rc; - for(; isspace(*buf); buf++); + buf = skip_spaces(buf); rc = simple_strtoul(buf, &buf, 10); if(*buf){ rc = -EINVAL; diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f7e0d30250b..10754a37566 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(raw_spinlock_t *lp) +void arch_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; } } -EXPORT_SYMBOL(_raw_spin_lock_wait); +EXPORT_SYMBOL(arch_spin_lock_wait); -void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) @@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); } } -EXPORT_SYMBOL(_raw_spin_lock_wait_flags); +EXPORT_SYMBOL(arch_spin_lock_wait_flags); -int _raw_spin_trylock_retry(raw_spinlock_t *lp) +int arch_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; for (count = spin_retry; count > 0; count--) { - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return 1; } return 0; } -EXPORT_SYMBOL(_raw_spin_trylock_retry); +EXPORT_SYMBOL(arch_spin_trylock_retry); -void _raw_spin_relax(raw_spinlock_t *lock) +void arch_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) _raw_yield_cpu(~cpu); } -EXPORT_SYMBOL(_raw_spin_relax); +EXPORT_SYMBOL(arch_spin_relax); -void _raw_read_lock_wait(raw_rwlock_t *rw) +void _raw_read_lock_wait(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); -void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; int count = spin_retry; @@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; local_irq_disable(); @@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_read_lock_wait_flags); -int _raw_read_trylock_retry(raw_rwlock_t *rw) +int _raw_read_trylock_retry(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; while (count-- > 0) { - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_trylock_retry); -void _raw_write_lock_wait(raw_rwlock_t *rw) +void _raw_write_lock_wait(arch_rwlock_t *rw) { int count = spin_retry; @@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return; @@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); -void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { int count = spin_retry; @@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; local_irq_disable(); if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) @@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_write_lock_wait_flags); -int _raw_write_trylock_retry(raw_rwlock_t *rw) +int _raw_write_trylock_retry(arch_rwlock_t *rw) { int count = spin_retry; while (count-- > 0) { - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return 1; diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053f..bdc0f3b6c56 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -23,10 +23,10 @@ * Your basic SMP spinlocks, allowing only a single CPU anywhere */ -#define __raw_spin_is_locked(x) ((x)->lock <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -34,14 +34,14 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_lock \n\t" + "movli.l @%2, %0 ! arch_spin_lock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__ ( - "mov #1, %0 ! __raw_spin_unlock \n\t" + "mov #1, %0 ! arch_spin_unlock \n\t" "mov.l %0, @%1 \n\t" : "=&z" (tmp) : "r" (&lock->lock) @@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" + "movli.l @%2, %0 ! arch_spin_trylock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((x)->lock > 0) +#define arch_read_can_lock(x) ((x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_lock \n\t" + "movli.l @%1, %0 ! arch_read_lock \n\t" "cmp/pl %0 \n\t" "bf 1b \n\t" "add #-1, %0 \n\t" @@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_unlock \n\t" + "movli.l @%1, %0 ! arch_read_unlock \n\t" "add #1, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" @@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_write_lock \n\t" + "movli.l @%1, %0 ! arch_write_lock \n\t" "cmp/hs %2, %0 \n\t" "bf 1b \n\t" "sub %2, %0 \n\t" @@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( - "mov.l %1, @%0 ! __raw_write_unlock \n\t" + "mov.l %1, @%0 ! arch_write_unlock \n\t" : : "r" (&rw->lock), "r" (RW_LOCK_BIAS) : "t", "memory" ); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_read_trylock \n\t" + "movli.l @%2, %0 ! arch_read_trylock \n\t" "mov %0, %1 \n\t" "cmp/pl %0 \n\t" "bf 2f \n\t" @@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_write_trylock \n\t" + "movli.l @%2, %0 ! arch_write_trylock \n\t" "mov %0, %1 \n\t" "cmp/hs %3, %0 \n\t" "bf 2f \n\t" @@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (oldval > (RW_LOCK_BIAS - 1)); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SH_SPINLOCK_H */ diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e7b60..9b7560db06c 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -7,15 +7,15 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index e1913f28f41..d2d41d04665 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } #endif diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 33ac1a9ac88..108197ac0d5 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -43,6 +43,7 @@ config SPARC64 select HAVE_SYSCALL_WRAPPERS select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_SYSCALL_TRACEPOINTS select USE_GENERIC_SMP_HELPERS if SMP select RTC_DRV_CMOS select RTC_DRV_BQ4802 diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 90d5fe223a7..9d3c889718a 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug @@ -33,4 +33,18 @@ config FRAME_POINTER depends on MCOUNT default y +config DEBUG_STRICT_USER_COPY_CHECKS + bool "Strict copy size checks" + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING + ---help--- + Enabling this option turns a certain set of sanity checks for user + copy operations into compile time failures. + + The copy_from_user() etc checks are there to help test if there + are sufficient security checks on the length argument of + the copy operation, by having gcc prove that the argument is + within bounds. + + If unsure, or if you run an older (pre 4.4) gcc, say N. + endmenu diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630cff63..7f9b9dba38a 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -10,12 +10,12 @@ #include <asm/psr.h> -#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } @@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ - * | 24-bit counter | wlock | raw_rwlock_t + * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * @@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(raw_rwlock_t *rw) +static inline void __arch_read_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_lock(lock) \ +#define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_lock(lock); \ + __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(raw_rwlock_t *rw) +static inline void __arch_read_unlock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_unlock(lock) \ +#define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_unlock(lock); \ + __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(raw_rwlock_t *rw) +static inline int __arch_read_trylock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( @@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) return res; } -#define __raw_read_trylock(lock) \ +#define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ - res = arch_read_trylock(lock); \ + res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) -#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) +#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) -#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) +#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() -#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) +#define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e51478358..073936a8b27 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -21,13 +21,13 @@ * the spinner sections must be pre-V9 branches. */ -#define __raw_spin_is_locked(lp) ((lp)->lock != 0) +#define arch_spin_is_locked(lp) ((lp)->lock != 0) -#define __raw_spin_unlock_wait(lp) \ +#define arch_spin_unlock_wait(lp) \ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(raw_rwlock_t *lock) +static void inline arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_read_trylock(raw_rwlock_t *lock) +static int inline arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) return tmp1; } -static void inline arch_read_unlock(raw_rwlock_t *lock) +static void inline arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_lock(raw_rwlock_t *lock) +static void inline arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_unlock(raw_rwlock_t *lock) +static void inline arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_write_trylock(raw_rwlock_t *lock) +static int inline arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; @@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) return result; } -#define __raw_read_lock(p) arch_read_lock(p) -#define __raw_read_lock_flags(p, f) arch_read_lock(p) -#define __raw_read_trylock(p) arch_read_trylock(p) -#define __raw_read_unlock(p) arch_read_unlock(p) -#define __raw_write_lock(p) arch_write_lock(p) -#define __raw_write_lock_flags(p, f) arch_write_lock(p) -#define __raw_write_unlock(p) arch_write_unlock(p) -#define __raw_write_trylock(p) arch_write_trylock(p) - -#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define __raw_write_can_lock(rw) (!(rw)->lock) - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_lock(p) arch_read_lock(p) +#define arch_read_lock_flags(p, f) arch_read_lock(p) +#define arch_read_trylock(p) arch_read_trylock(p) +#define arch_read_unlock(p) arch_read_unlock(p) +#define arch_write_lock(p) arch_write_lock(p) +#define arch_write_lock_flags(p, f) arch_write_lock(p) +#define arch_write_unlock(p) arch_write_unlock(p) +#define arch_write_trylock(p) arch_write_trylock(p) + +#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define arch_write_can_lock(rw) (!(rw)->lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01c585..9c454fdeaad 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned char lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h index 6c5fddb7e6b..edf196ee4ef 100644 --- a/arch/sparc/include/asm/string_32.h +++ b/arch/sparc/include/asm/string_32.h @@ -16,8 +16,6 @@ #ifdef __KERNEL__ extern void __memmove(void *,const void *,__kernel_size_t); -extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t); -extern __kernel_size_t __memset(void *,int,__kernel_size_t); #ifndef EXPORT_SYMTAB_STROPS @@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t); }) #define __HAVE_ARCH_MEMCPY - -static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n) -{ - extern void __copy_1page(void *, const void *); - - if(n <= 32) { - __builtin_memcpy(to, from, n); - } else if (((unsigned int) to & 7) != 0) { - /* Destination is not aligned on the double-word boundary */ - __memcpy(to, from, n); - } else { - switch(n) { - case PAGE_SIZE: - __copy_1page(to, from); - break; - default: - __memcpy(to, from, n); - break; - } - } - return to; -} - -static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n) -{ - __memcpy(to, from, n); - return to; -} - -#undef memcpy -#define memcpy(t, f, n) \ -(__builtin_constant_p(n) ? \ - __constant_memcpy((t),(f),(n)) : \ - __nonconstant_memcpy((t),(f),(n))) +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) #define __HAVE_ARCH_MEMSET - -static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count) -{ - extern void bzero_1page(void *); - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if(!c) { - if(count == PAGE_SIZE) - bzero_1page(s); - else - __bzero(s, count); - } else { - __memset(s, c, count); - } - return s; -} - -static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count) -{ - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if(!c) - __bzero(s, count); - else - __memset(s, c, count); - return s; -} - -static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count) -{ - __memset(s, c, count); - return s; -} - -#undef memset -#define memset(s, c, count) \ -(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \ - __constant_c_and_count_memset((s), (c), (count)) : \ - __constant_c_memset((s), (c), (count))) \ - : __nonconstant_memset((s), (c), (count))) +#define memset(s, c, count) __builtin_memset(s, c, count) #define __HAVE_ARCH_MEMSCAN diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h index 43161f2d17e..9623bc21315 100644 --- a/arch/sparc/include/asm/string_64.h +++ b/arch/sparc/include/asm/string_64.h @@ -15,8 +15,6 @@ #include <asm/asi.h> -extern void *__memset(void *,int,__kernel_size_t); - #ifndef EXPORT_SYMTAB_STROPS /* First the mem*() things. */ @@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t); extern void *memmove(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMCPY -extern void *memcpy(void *, const void *, __kernel_size_t); +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) #define __HAVE_ARCH_MEMSET -extern void *__builtin_memset(void *,int,__kernel_size_t); - -static inline void *__constant_memset(void *s, int c, __kernel_size_t count) -{ - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if (!c) { - __bzero(s, count); - return s; - } else - return __memset(s, c, count); -} - -#undef memset -#define memset(s, c, count) \ -((__builtin_constant_p(count) && (count) <= 32) ? \ - __builtin_memset((s), (c), (count)) : \ - (__builtin_constant_p(c) ? \ - __constant_memset((s), (c), (count)) : \ - __memset((s), (c), (count)))) +#define memset(s, c, count) __builtin_memset(s, c, count) #define __HAVE_ARCH_MEMSCAN diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 1b45a7bbe40..7257ebb8f39 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); /* flag bit 8 is available */ #define TIF_SECCOMP 9 /* secure computing */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ /* flag bit 11 is available */ /* NOTE: Thread flags >= 12 should be ones we have no interest * in using in assembly, else we can't use the mask as @@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1<<TIF_FREEZE) diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 8303ac48103..489d2ba92bc 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un return __copy_user(to, (__force void __user *) from, n); } +extern void copy_from_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_from_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_from_user() buffer size is not provably correct") +#endif +; + static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + int sz = __compiletime_object_size(to); + + if (unlikely(sz != -1 && sz < n)) { + copy_from_user_overflow(); + return -EFAULT; + } + if (n && __access_ok((unsigned long) from, n)) return __copy_user((__force void __user *) to, from, n); else diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 9ea271e19c7..dbc14166099 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -6,6 +6,7 @@ */ #ifdef __KERNEL__ +#include <linux/errno.h> #include <linux/compiler.h> #include <linux/string.h> #include <linux/thread_info.h> @@ -204,6 +205,14 @@ __asm__ __volatile__( \ extern int __get_user_bad(void); +extern void copy_from_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_from_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_from_user() buffer size is not provably correct") +#endif +; + extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long size); @@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret = ___copy_from_user(to, from, size); - - if (unlikely(ret)) - ret = copy_from_user_fixup(to, from, size); + unsigned long ret = (unsigned long) -EFAULT; + int sz = __compiletime_object_size(to); + + if (likely(sz == -1 || sz >= size)) { + ret = ___copy_from_user(to, from, size); + if (unlikely(ret)) + ret = copy_from_user_fixup(to, from, size); + } else { + copy_from_user_overflow(); + } return ret; } #define __copy_from_user copy_from_user diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index d8d25bd9712..cb4b9bfd0d8 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -398,7 +398,7 @@ #define __NR_perf_event_open 327 #define __NR_recvmmsg 328 -#define NR_SYSCALLS 329 +#define NR_syscalls 329 #ifdef __32bit_syscall_numbers__ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index ec9c7bc67d2..1504df8ddf7 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -1294,7 +1294,7 @@ linux_sparc_syscall: sethi %hi(PSR_SYSCALL), %l4 or %l0, %l4, %l0 /* Direct access to user regs, must faster. */ - cmp %g1, NR_SYSCALLS + cmp %g1, NR_syscalls bgeu linux_sparc_ni_syscall sll %g1, 2, %l4 ld [%l7 + %l4], %l7 diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index d3b1a307656..29973daa993 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -4,6 +4,7 @@ #include <linux/percpu.h> #include <linux/init.h> #include <linux/list.h> +#include <trace/syscall.h> #include <asm/ftrace.h> @@ -91,3 +92,13 @@ int __init ftrace_dyn_arch_init(void *data) } #endif +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned int sys_call_table[]; + +unsigned long __init arch_syscall_addr(int nr) +{ + return (unsigned long)sys_call_table[nr]; +} + +#endif diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index ce996f97855..8d6882bb480 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) @@ -785,14 +785,14 @@ void fixup_irqs(void) for (irq = 0; irq < NR_IRQS; irq++) { unsigned long flags; - spin_lock_irqsave(&irq_desc[irq].lock, flags); + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); if (irq_desc[irq].action && !(irq_desc[irq].status & IRQ_PER_CPU)) { if (irq_desc[irq].chip->set_affinity) irq_desc[irq].chip->set_affinity(irq, irq_desc[irq].affinity); } - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } tick_ops->disable_irq(); diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index 3bc6527c95a..6716584e48a 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -46,6 +46,9 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; int __kprobes arch_prepare_kprobe(struct kprobe *p) { + if ((unsigned long) p->addr & 0x3UL) + return -EILSEQ; + p->ainsn.insn[0] = *p->addr; flushi(&p->ainsn.insn[0]); diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index cb3c72c45aa..e0ba898e30c 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name) snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); err = request_irq(lp->cfg.rx_irq, ldc_rx, - IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, lp->rx_irq_name, lp); if (err) return err; err = request_irq(lp->cfg.tx_irq, ldc_tx, - IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, lp->tx_irq_name, lp); if (err) { free_irq(lp->cfg.rx_irq, lp); diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 938da19dc06..cdc91d919e9 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/mm.h> #include <linux/miscdevice.h> +#include <linux/bootmem.h> #include <asm/cpudata.h> #include <asm/hypervisor.h> @@ -108,25 +109,15 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) static void mdesc_lmb_free(struct mdesc_handle *hp) { - unsigned int alloc_size, handle_size = hp->handle_size; - unsigned long start, end; + unsigned int alloc_size; + unsigned long start; BUG_ON(atomic_read(&hp->refcnt) != 0); BUG_ON(!list_empty(&hp->list)); - alloc_size = PAGE_ALIGN(handle_size); - - start = (unsigned long) hp; - end = start + alloc_size; - - while (start < end) { - struct page *p; - - p = virt_to_page(start); - ClearPageReserved(p); - __free_page(p); - start += PAGE_SIZE; - } + alloc_size = PAGE_ALIGN(hp->handle_size); + start = __pa(hp); + free_bootmem_late(start, alloc_size); } static struct mdesc_mem_ops lmb_mdesc_ops = { diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 881947e59e9..0a6f2d1798d 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -104,9 +104,19 @@ static int of_bus_pci_map(u32 *addr, const u32 *range, int i; /* Check address type match */ - if ((addr[0] ^ range[0]) & 0x03000000) - return -EINVAL; + if (!((addr[0] ^ range[0]) & 0x03000000)) + goto type_match; + + /* Special exception, we can map a 64-bit address into + * a 32-bit range. + */ + if ((addr[0] & 0x03000000) == 0x03000000 && + (range[0] & 0x03000000) == 0x02000000) + goto type_match; + + return -EINVAL; +type_match: if (of_out_of_range(addr + 1, range + 1, range + na + pna, na - 1, ns)) return -EINVAL; diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 4ae91dc2feb..2f6524d1a81 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -23,6 +23,7 @@ #include <linux/signal.h> #include <linux/regset.h> #include <linux/tracehook.h> +#include <trace/syscall.h> #include <linux/compat.h> #include <linux/elf.h> @@ -37,6 +38,9 @@ #include <asm/cpudata.h> #include <asm/cacheflush.h> +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + #include "entry.h" /* #define ALLOW_INIT_TRACING */ @@ -1059,6 +1063,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACE)) ret = tracehook_report_syscall_entry(regs); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->u_regs[UREG_G1]); + if (unlikely(current->audit_context) && !ret) audit_syscall_entry((test_thread_flag(TIF_32BIT) ? AUDIT_ARCH_SPARC : @@ -1084,6 +1091,9 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(result, regs->u_regs[UREG_I0]); } + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->u_regs[UREG_G1]); + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); } diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index d150c2aa98d..dc4a458f74d 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -62,7 +62,7 @@ sys32_rt_sigreturn: #endif .align 32 1: ldx [%g6 + TI_FLAGS], %l5 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 + andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 be,pt %icc, rtrap nop call syscall_trace_leave @@ -187,7 +187,7 @@ linux_syscall_trace: .globl linux_sparc_syscall32 linux_sparc_syscall32: /* Direct access to user regs, much faster. */ - cmp %g1, NR_SYSCALLS ! IEU1 Group + cmp %g1, NR_syscalls ! IEU1 Group bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI srl %i0, 0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group @@ -198,7 +198,7 @@ linux_sparc_syscall32: srl %i5, 0, %o5 ! IEU1 srl %i2, 0, %o2 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 call %l7 ! CTI Group brk forced @@ -210,7 +210,7 @@ linux_sparc_syscall32: .globl linux_sparc_syscall linux_sparc_syscall: /* Direct access to user regs, much faster. */ - cmp %g1, NR_SYSCALLS ! IEU1 Group + cmp %g1, NR_syscalls ! IEU1 Group bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI mov %i0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group @@ -221,7 +221,7 @@ linux_sparc_syscall: mov %i3, %o3 ! IEU1 mov %i4, %o4 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 bne,pn %icc, linux_syscall_trace ! CTI Group mov %i0, %l5 ! IEU0 2: call %l7 ! CTI Group brk forced @@ -245,7 +245,7 @@ ret_sys_call: cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 80: /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 @@ -260,7 +260,7 @@ ret_sys_call: /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 sub %g0, %o0, %o0 or %g3, %g2, %g3 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 63f73ae8a89..67e16510288 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -774,26 +774,9 @@ void __devinit setup_sparc64_timer(void) static struct clocksource clocksource_tick = { .rating = 100, .mask = CLOCKSOURCE_MASK(64), - .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static void __init setup_clockevent_multiplier(unsigned long hz) -{ - unsigned long mult, shift = 32; - - while (1) { - mult = div_sc(hz, NSEC_PER_SEC, shift); - if (mult && (mult >> 32UL) == 0UL) - break; - - shift--; - } - - sparc64_clockevent.shift = shift; - sparc64_clockevent.mult = mult; -} - static unsigned long tb_ticks_per_usec __read_mostly; void __delay(unsigned long loops) @@ -828,9 +811,7 @@ void __init time_init(void) clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); clocksource_tick.name = tick_ops->name; - clocksource_tick.mult = - clocksource_hz2mult(freq, - clocksource_tick.shift); + clocksource_calc_mult_shift(&clocksource_tick, freq, 4); clocksource_tick.read = clocksource_tick_read; printk("clocksource: mult[%x] shift[%d]\n", @@ -839,15 +820,14 @@ void __init time_init(void) clocksource_register(&clocksource_tick); sparc64_clockevent.name = tick_ops->name; - - setup_clockevent_multiplier(freq); + clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4); sparc64_clockevent.max_delta_ns = clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); sparc64_clockevent.min_delta_ns = clockevent_delta2ns(0xF, &sparc64_clockevent); - printk("clockevent: mult[%ux] shift[%d]\n", + printk("clockevent: mult[%x] shift[%d]\n", sparc64_clockevent.mult, sparc64_clockevent.shift); setup_sparc64_timer(); diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 6b1e6cde6ff..f8514e291e1 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -17,8 +17,7 @@ #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/smp_lock.h> - -/* #define DEBUG_MNA */ +#include <linux/perf_event.h> enum direction { load, /* ld, ldd, ldh, ldsh */ @@ -29,12 +28,6 @@ enum direction { invalid, }; -#ifdef DEBUG_MNA -static char *dirstrings[] = { - "load", "store", "both", "fpload", "fpstore", "invalid" -}; -#endif - static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; @@ -255,10 +248,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) unsigned long addr = compute_effective_address(regs, insn); int err; -#ifdef DEBUG_MNA - printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n", - regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); -#endif + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), @@ -350,6 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) } addr = compute_effective_address(regs, insn); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 379209982a0..378ca82b9cc 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -20,10 +20,9 @@ #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/bitops.h> +#include <linux/perf_event.h> #include <asm/fpumacro.h> -/* #define DEBUG_MNA */ - enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ @@ -33,12 +32,6 @@ enum direction { invalid, }; -#ifdef DEBUG_MNA -static char *dirstrings[] = { - "load", "store", "both", "fpload", "fpstore", "invalid" -}; -#endif - static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; @@ -327,12 +320,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); -#ifdef DEBUG_MNA - printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] " - "retpc[%016lx]\n", - regs->tpc, dirstrings[dir], addr, size, - regs->u_regs[UREG_RETPC]); -#endif + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: @@ -399,6 +387,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); @@ -445,6 +434,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; if (freg & 3) { @@ -566,6 +557,8 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); if (from_kernel || rd < 16) { @@ -596,6 +589,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { @@ -657,6 +651,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index d231cbd5c52..9dfd2ebcb15 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -5,6 +5,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/thread_info.h> +#include <linux/perf_event.h> #include <asm/ptrace.h> #include <asm/pstate.h> @@ -801,6 +802,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) BUG_ON(regs->tstate & TSTATE_PRIV); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index e75faf0e59a..c4b5e03af11 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -44,3 +44,4 @@ obj-y += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o obj-y += ksyms.o obj-$(CONFIG_SPARC64) += PeeCeeI.o +obj-y += usercopy.o diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S index b6557297440..615f401edf6 100644 --- a/arch/sparc/lib/bzero.S +++ b/arch/sparc/lib/bzero.S @@ -6,10 +6,6 @@ .text - .globl __memset - .type __memset, #function -__memset: /* %o0=buf, %o1=pat, %o2=len */ - .globl memset .type memset, #function memset: /* %o0=buf, %o1=pat, %o2=len */ @@ -83,7 +79,6 @@ __bzero_done: retl mov %o3, %o0 .size __bzero, .-__bzero - .size __memset, .-__memset .size memset, .-memset #define EX_ST(x,y) \ diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 77f228533d4..3632cb34e91 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -560,7 +560,7 @@ __csum_partial_copy_end: mov %i0, %o1 mov %i1, %o0 5: - call __memcpy + call memcpy mov %i2, %o2 tst %o0 bne,a 2f diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 704b1266838..1b30bb3bfdb 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -30,7 +30,6 @@ EXPORT_SYMBOL(__memscan_generic); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__bzero); @@ -81,7 +80,6 @@ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); /* Special internal versions of library functions. */ EXPORT_SYMBOL(__copy_1page); -EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(bzero_1page); diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 7ce9c65f359..24b8b12deed 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -64,8 +64,9 @@ mcount: 2: sethi %hi(softirq_stack), %g3 or %g3, %lo(softirq_stack), %g3 ldx [%g3 + %g1], %g7 + sub %g7, STACK_BIAS, %g7 cmp %sp, %g7 - bleu,pt %xcc, 2f + bleu,pt %xcc, 3f sethi %hi(THREAD_SIZE), %g3 add %g7, %g3, %g7 cmp %sp, %g7 @@ -75,7 +76,7 @@ mcount: * again, we are already trying to output the stack overflow * message. */ - sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough +3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough or %g7, %lo(ovstack), %g7 add %g7, OVSTACKSIZE, %g3 sub %g3, STACK_BIAS + 192, %g3 diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S index ce10bc869af..34fe6575173 100644 --- a/arch/sparc/lib/memcpy.S +++ b/arch/sparc/lib/memcpy.S @@ -543,9 +543,6 @@ FUNC(memmove) b 3f add %o0, 2, %o0 -#ifdef __KERNEL__ -FUNC(__memcpy) -#endif FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ sub %o0, %o1, %o4 diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index 1c37ea892de..99c017be871 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -60,11 +60,10 @@ .globl __bzero_begin __bzero_begin: - .globl __bzero, __memset, + .globl __bzero .globl memset .globl __memset_start, __memset_end __memset_start: -__memset: memset: and %o1, 0xff, %g3 sll %g3, 8, %g2 diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c new file mode 100644 index 00000000000..14b363fec8a --- /dev/null +++ b/arch/sparc/lib/usercopy.c @@ -0,0 +1,8 @@ +#include <linux/module.h> +#include <linux/bug.h> + +void copy_from_user_overflow(void) +{ + WARN(1, "Buffer overflow detected!\n"); +} +EXPORT_SYMBOL(copy_from_user_overflow); diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index e13f65da17d..a3fccde894e 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -67,6 +67,7 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/perf_event.h> #include <asm/uaccess.h> #include "sfp-util_32.h" @@ -163,6 +164,8 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) int retcode = 0; /* assume all succeed */ unsigned long insn; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + #ifdef DEBUG_MATHEMU printk("In do_mathemu()... pc is %08lx\n", regs->pc); printk("fpqdepth is %ld\n", fpt->thread.fpqdepth); diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 6863c9bde25..56d2c44747b 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> +#include <linux/perf_event.h> #include <asm/fpumacro.h> #include <asm/ptrace.h> @@ -183,6 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) if (tstate & TSTATE_PRIV) die_if_kernel("unfinished/unimplemented FPop from kernel", regs); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 43b0da96a4f..6081936bf03 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -31,13 +31,12 @@ #include <asm/sections.h> #include <asm/mmu_context.h> -#ifdef CONFIG_KPROBES -static inline int notify_page_fault(struct pt_regs *regs) +static inline __kprobes int notify_page_fault(struct pt_regs *regs) { int ret = 0; /* kprobe_running() needs smp_processor_id() */ - if (!user_mode(regs)) { + if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 0)) ret = 1; @@ -45,12 +44,6 @@ static inline int notify_page_fault(struct pt_regs *regs) } return ret; } -#else -static inline int notify_page_fault(struct pt_regs *regs) -{ - return 0; -} -#endif static void __kprobes unhandled_fault(unsigned long address, struct task_struct *tsk, @@ -73,7 +66,7 @@ static void __kprobes unhandled_fault(unsigned long address, die_if_kernel("Oops", regs); } -static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) +static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) { printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); @@ -170,8 +163,9 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) return insn; } -static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, - unsigned int insn, unsigned long address) +static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, + int fault_code, unsigned int insn, + unsigned long address) { unsigned char asi = ASI_P; @@ -225,7 +219,7 @@ cannot_handle: unhandled_fault (address, current, regs); } -static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs) +static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) { static int times; @@ -237,8 +231,8 @@ static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs) show_regs(regs); } -static void noinline bogus_32bit_fault_address(struct pt_regs *regs, - unsigned long addr) +static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, + unsigned long addr) { static int times; diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index e14629c87de..51069245b79 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -6,6 +6,7 @@ #include <linux/console.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/mm.h> @@ -131,7 +132,7 @@ void mconsole_proc(struct mc_request *req) char *ptr = req->request.data, *buf; ptr += strlen("proc"); - while (isspace(*ptr)) ptr++; + ptr = skip_spaces(ptr); proc = get_fs_type("proc"); if (proc == NULL) { @@ -212,8 +213,7 @@ void mconsole_proc(struct mc_request *req) char *ptr = req->request.data; ptr += strlen("proc"); - while (isspace(*ptr)) - ptr++; + ptr = skip_spaces(ptr); snprintf(path, sizeof(path), "/proc/%s", ptr); fd = sys_open(path, 0, 0); @@ -560,8 +560,7 @@ void mconsole_config(struct mc_request *req) int err; ptr += strlen("config"); - while (isspace(*ptr)) - ptr++; + ptr = skip_spaces(ptr); dev = mconsole_find_dev(ptr); if (dev == NULL) { mconsole_reply(req, "Bad configuration option", 1, 0); @@ -588,7 +587,7 @@ void mconsole_remove(struct mc_request *req) int err, start, end, n; ptr += strlen("remove"); - while (isspace(*ptr)) ptr++; + ptr = skip_spaces(ptr); dev = mconsole_find_dev(ptr); if (dev == NULL) { mconsole_reply(req, "Bad remove option", 1, 0); @@ -712,7 +711,7 @@ void mconsole_sysrq(struct mc_request *req) char *ptr = req->request.data; ptr += strlen("sysrq"); - while (isspace(*ptr)) ptr++; + ptr = skip_spaces(ptr); /* * With 'b', the system will shut down without a chance to reply, @@ -757,8 +756,7 @@ void mconsole_stack(struct mc_request *req) */ ptr += strlen("stack"); - while (isspace(*ptr)) - ptr++; + ptr = skip_spaces(ptr); /* * Should really check for multiple pids or reject bad args here @@ -833,8 +831,8 @@ static int __init mconsole_init(void) __initcall(mconsole_init); -static int write_proc_mconsole(struct file *file, const char __user *buffer, - unsigned long count, void *data) +static ssize_t mconsole_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) { char *buf; @@ -855,6 +853,11 @@ static int write_proc_mconsole(struct file *file, const char __user *buffer, return count; } +static const struct file_operations mconsole_proc_fops = { + .owner = THIS_MODULE, + .write = mconsole_proc_write, +}; + static int create_proc_mconsole(void) { struct proc_dir_entry *ent; @@ -862,15 +865,12 @@ static int create_proc_mconsole(void) if (notify_socket == NULL) return 0; - ent = create_proc_entry("mconsole", S_IFREG | 0200, NULL); + ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_fops); if (ent == NULL) { printk(KERN_INFO "create_proc_mconsole : create_proc_entry " "failed\n"); return 0; } - - ent->read_proc = NULL; - ent->write_proc = write_proc_mconsole; return 0; } diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 635d16d90a8..5ff554677f4 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -27,6 +27,7 @@ #include "linux/init.h" #include "linux/cdrom.h" #include "linux/proc_fs.h" +#include "linux/seq_file.h" #include "linux/ctype.h" #include "linux/capability.h" #include "linux/mm.h" @@ -200,23 +201,25 @@ static void make_proc_ide(void) proc_ide = proc_mkdir("ide0", proc_ide_root); } -static int proc_ide_read_media(char *page, char **start, off_t off, int count, - int *eof, void *data) +static int fake_ide_media_proc_show(struct seq_file *m, void *v) { - int len; - - strcpy(page, "disk\n"); - len = strlen("disk\n"); - len -= off; - if (len < count){ - *eof = 1; - if (len <= 0) return 0; - } - else len = count; - *start = page + off; - return len; + seq_puts(m, "disk\n"); + return 0; +} + +static int fake_ide_media_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, fake_ide_media_proc_show, NULL); } +static const struct file_operations fake_ide_media_proc_fops = { + .owner = THIS_MODULE, + .open = fake_ide_media_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static void make_ide_entries(const char *dev_name) { struct proc_dir_entry *dir, *ent; @@ -227,11 +230,8 @@ static void make_ide_entries(const char *dev_name) dir = proc_mkdir(dev_name, proc_ide); if(!dir) return; - ent = create_proc_entry("media", S_IFREG|S_IRUGO, dir); + ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops); if(!ent) return; - ent->data = NULL; - ent->read_proc = proc_ide_read_media; - ent->write_proc = NULL; snprintf(name, sizeof(name), "ide0/%s", dev_name); proc_symlink(dev_name, proc_ide_root, name); } diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c index 6540d2c9fbb..829df49dee9 100644 --- a/arch/um/kernel/exitcode.c +++ b/arch/um/kernel/exitcode.c @@ -6,7 +6,9 @@ #include <linux/ctype.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/types.h> #include <asm/uaccess.h> @@ -16,30 +18,26 @@ */ int uml_exitcode = 0; -static int read_proc_exitcode(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int exitcode_proc_show(struct seq_file *m, void *v) { - int len, val; + int val; /* * Save uml_exitcode in a local so that we don't need to guarantee * that sprintf accesses it atomically. */ val = uml_exitcode; - len = sprintf(page, "%d\n", val); - len -= off; - if (len <= off+count) - *eof = 1; - *start = page + off; - if (len > count) - len = count; - if (len < 0) - len = 0; - return len; + seq_printf(m, "%d\n", val); + return 0; +} + +static int exitcode_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, exitcode_proc_show, NULL); } -static int write_proc_exitcode(struct file *file, const char __user *buffer, - unsigned long count, void *data) +static ssize_t exitcode_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) { char *end, buf[sizeof("nnnnn\0")]; int tmp; @@ -55,20 +53,25 @@ static int write_proc_exitcode(struct file *file, const char __user *buffer, return count; } +static const struct file_operations exitcode_proc_fops = { + .owner = THIS_MODULE, + .open = exitcode_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = exitcode_proc_write, +}; + static int make_proc_exitcode(void) { struct proc_dir_entry *ent; - ent = create_proc_entry("exitcode", 0600, NULL); + ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops); if (ent == NULL) { printk(KERN_WARNING "make_proc_exitcode : Failed to register " "/proc/exitcode\n"); return 0; } - - ent->read_proc = read_proc_exitcode; - ent->write_proc = write_proc_exitcode; - return 0; } diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 039270b9b73..89474ba0741 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_putc(p, '\n'); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 4a28a1568d8..2f910a1b745 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -9,11 +9,13 @@ #include <linux/hardirq.h> #include <linux/gfp.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/personality.h> #include <linux/proc_fs.h> #include <linux/ptrace.h> #include <linux/random.h> #include <linux/sched.h> +#include <linux/seq_file.h> #include <linux/tick.h> #include <linux/threads.h> #include <asm/current.h> @@ -336,16 +338,19 @@ int get_using_sysemu(void) return atomic_read(&using_sysemu); } -static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) +static int sysemu_proc_show(struct seq_file *m, void *v) { - if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) - /* No overflow */ - *eof = 1; + seq_printf(m, "%d\n", get_using_sysemu()); + return 0; +} - return strlen(buf); +static int sysemu_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, sysemu_proc_show, NULL); } -static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data) +static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) { char tmp[2]; @@ -358,13 +363,22 @@ static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned return count; } +static const struct file_operations sysemu_proc_fops = { + .owner = THIS_MODULE, + .open = sysemu_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = sysemu_proc_write, +}; + int __init make_proc_sysemu(void) { struct proc_dir_entry *ent; if (!sysemu_supported) return 0; - ent = create_proc_entry("sysemu", 0600, NULL); + ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); if (ent == NULL) { @@ -372,9 +386,6 @@ int __init make_proc_sysemu(void) return 0; } - ent->read_proc = proc_read_sysemu; - ent->write_proc = proc_write_sysemu; - return 0; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 32a1918e1b8..3b2a5aca4ed 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2012,18 +2012,9 @@ config SCx200HR_TIMER processor goes idle (as is done by the scheduler). The other workaround is idle=poll boot option. -config GEODE_MFGPT_TIMER - def_bool y - prompt "Geode Multi-Function General Purpose Timer (MFGPT) events" - depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS - ---help--- - This driver provides a clock event source based on the MFGPT - timer(s) in the CS5535 and CS5536 companion chip for the geode. - MFGPTs have a better resolution and max interval than the - generic PIT, and are suitable for use as high-res timers. - config OLPC bool "One Laptop Per Child support" + select GPIOLIB default n ---help--- Add support for detecting the unique features of the OLPC diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h index ad3c2ed7548..7cd73552a4e 100644 --- a/arch/x86/include/asm/geode.h +++ b/arch/x86/include/asm/geode.h @@ -12,160 +12,7 @@ #include <asm/processor.h> #include <linux/io.h> - -/* Generic southbridge functions */ - -#define GEODE_DEV_PMS 0 -#define GEODE_DEV_ACPI 1 -#define GEODE_DEV_GPIO 2 -#define GEODE_DEV_MFGPT 3 - -extern int geode_get_dev_base(unsigned int dev); - -/* Useful macros */ -#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS) -#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI) -#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO) -#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT) - -/* MSRS */ - -#define MSR_GLIU_P2D_RO0 0x10000029 - -#define MSR_LX_GLD_MSR_CONFIG 0x48002001 -#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data - * sheet has the wrong value */ -#define MSR_GLCP_SYS_RSTPLL 0x4C000014 -#define MSR_GLCP_DOTPLL 0x4C000015 - -#define MSR_LBAR_SMB 0x5140000B -#define MSR_LBAR_GPIO 0x5140000C -#define MSR_LBAR_MFGPT 0x5140000D -#define MSR_LBAR_ACPI 0x5140000E -#define MSR_LBAR_PMS 0x5140000F - -#define MSR_DIVIL_SOFT_RESET 0x51400017 - -#define MSR_PIC_YSEL_LOW 0x51400020 -#define MSR_PIC_YSEL_HIGH 0x51400021 -#define MSR_PIC_ZSEL_LOW 0x51400022 -#define MSR_PIC_ZSEL_HIGH 0x51400023 -#define MSR_PIC_IRQM_LPC 0x51400025 - -#define MSR_MFGPT_IRQ 0x51400028 -#define MSR_MFGPT_NR 0x51400029 -#define MSR_MFGPT_SETUP 0x5140002B - -#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ - -#define MSR_GX_GLD_MSR_CONFIG 0xC0002001 -#define MSR_GX_MSR_PADSEL 0xC0002011 - -/* Resource Sizes */ - -#define LBAR_GPIO_SIZE 0xFF -#define LBAR_MFGPT_SIZE 0x40 -#define LBAR_ACPI_SIZE 0x40 -#define LBAR_PMS_SIZE 0x80 - -/* ACPI registers (PMS block) */ - -/* - * PM1_EN is only valid when VSA is enabled for 16 bit reads. - * When VSA is not enabled, *always* read both PM1_STS and PM1_EN - * with a 32 bit read at offset 0x0 - */ - -#define PM1_STS 0x00 -#define PM1_EN 0x02 -#define PM1_CNT 0x08 -#define PM2_CNT 0x0C -#define PM_TMR 0x10 -#define PM_GPE0_STS 0x18 -#define PM_GPE0_EN 0x1C - -/* PMC registers (PMS block) */ - -#define PM_SSD 0x00 -#define PM_SCXA 0x04 -#define PM_SCYA 0x08 -#define PM_OUT_SLPCTL 0x0C -#define PM_SCLK 0x10 -#define PM_SED 0x1 -#define PM_SCXD 0x18 -#define PM_SCYD 0x1C -#define PM_IN_SLPCTL 0x20 -#define PM_WKD 0x30 -#define PM_WKXD 0x34 -#define PM_RD 0x38 -#define PM_WKXA 0x3C -#define PM_FSD 0x40 -#define PM_TSD 0x44 -#define PM_PSD 0x48 -#define PM_NWKD 0x4C -#define PM_AWKD 0x50 -#define PM_SSC 0x54 - -/* VSA2 magic values */ - -#define VSA_VRC_INDEX 0xAC1C -#define VSA_VRC_DATA 0xAC1E -#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ -#define VSA_VR_SIGNATURE 0x0003 -#define VSA_VR_MEM_SIZE 0x0200 -#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ -#define GSW_VSA_SIG 0x534d /* General Software signature */ -/* GPIO */ - -#define GPIO_OUTPUT_VAL 0x00 -#define GPIO_OUTPUT_ENABLE 0x04 -#define GPIO_OUTPUT_OPEN_DRAIN 0x08 -#define GPIO_OUTPUT_INVERT 0x0C -#define GPIO_OUTPUT_AUX1 0x10 -#define GPIO_OUTPUT_AUX2 0x14 -#define GPIO_PULL_UP 0x18 -#define GPIO_PULL_DOWN 0x1C -#define GPIO_INPUT_ENABLE 0x20 -#define GPIO_INPUT_INVERT 0x24 -#define GPIO_INPUT_FILTER 0x28 -#define GPIO_INPUT_EVENT_COUNT 0x2C -#define GPIO_READ_BACK 0x30 -#define GPIO_INPUT_AUX1 0x34 -#define GPIO_EVENTS_ENABLE 0x38 -#define GPIO_LOCK_ENABLE 0x3C -#define GPIO_POSITIVE_EDGE_EN 0x40 -#define GPIO_NEGATIVE_EDGE_EN 0x44 -#define GPIO_POSITIVE_EDGE_STS 0x48 -#define GPIO_NEGATIVE_EDGE_STS 0x4C - -#define GPIO_MAP_X 0xE0 -#define GPIO_MAP_Y 0xE4 -#define GPIO_MAP_Z 0xE8 -#define GPIO_MAP_W 0xEC - -static inline u32 geode_gpio(unsigned int nr) -{ - BUG_ON(nr > 28); - return 1 << nr; -} - -extern void geode_gpio_set(u32, unsigned int); -extern void geode_gpio_clear(u32, unsigned int); -extern int geode_gpio_isset(u32, unsigned int); -extern void geode_gpio_setup_event(unsigned int, int, int); -extern void geode_gpio_set_irq(unsigned int, unsigned int); - -static inline void geode_gpio_event_irq(unsigned int gpio, int pair) -{ - geode_gpio_setup_event(gpio, pair, 0); -} - -static inline void geode_gpio_event_pme(unsigned int gpio, int pair) -{ - geode_gpio_setup_event(gpio, pair, 1); -} - -/* Specific geode tests */ +#include <linux/cs5535.h> static inline int is_geode_gx(void) { @@ -186,68 +33,4 @@ static inline int is_geode(void) return (is_geode_gx() || is_geode_lx()); } -#ifdef CONFIG_MGEODE_LX -extern int geode_has_vsa2(void); -#else -static inline int geode_has_vsa2(void) -{ - return 0; -} -#endif - -/* MFGPTs */ - -#define MFGPT_MAX_TIMERS 8 -#define MFGPT_TIMER_ANY (-1) - -#define MFGPT_DOMAIN_WORKING 1 -#define MFGPT_DOMAIN_STANDBY 2 -#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) - -#define MFGPT_CMP1 0 -#define MFGPT_CMP2 1 - -#define MFGPT_EVENT_IRQ 0 -#define MFGPT_EVENT_NMI 1 -#define MFGPT_EVENT_RESET 3 - -#define MFGPT_REG_CMP1 0 -#define MFGPT_REG_CMP2 2 -#define MFGPT_REG_COUNTER 4 -#define MFGPT_REG_SETUP 6 - -#define MFGPT_SETUP_CNTEN (1 << 15) -#define MFGPT_SETUP_CMP2 (1 << 14) -#define MFGPT_SETUP_CMP1 (1 << 13) -#define MFGPT_SETUP_SETUP (1 << 12) -#define MFGPT_SETUP_STOPEN (1 << 11) -#define MFGPT_SETUP_EXTEN (1 << 10) -#define MFGPT_SETUP_REVEN (1 << 5) -#define MFGPT_SETUP_CLKSEL (1 << 4) - -static inline void geode_mfgpt_write(int timer, u16 reg, u16 value) -{ - u32 base = geode_get_dev_base(GEODE_DEV_MFGPT); - outw(value, base + reg + (timer * 8)); -} - -static inline u16 geode_mfgpt_read(int timer, u16 reg) -{ - u32 base = geode_get_dev_base(GEODE_DEV_MFGPT); - return inw(base + reg + (timer * 8)); -} - -extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); -extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable); -extern int geode_mfgpt_alloc_timer(int timer, int domain); - -#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) -#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0) - -#ifdef CONFIG_GEODE_MFGPT_TIMER -extern int __init mfgpt_timer_setup(void); -#else -static inline int mfgpt_timer_setup(void) { return 0; } -#endif - #endif /* _ASM_X86_GEODE_H */ diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h index 834a30295fa..3a57385d9fa 100644 --- a/arch/x86/include/asm/olpc.h +++ b/arch/x86/include/asm/olpc.h @@ -120,7 +120,7 @@ extern int olpc_ec_mask_unset(uint8_t bits); /* GPIO assignments */ -#define OLPC_GPIO_MIC_AC geode_gpio(1) +#define OLPC_GPIO_MIC_AC 1 #define OLPC_GPIO_DCON_IRQ geode_gpio(7) #define OLPC_GPIO_THRM_ALRM geode_gpio(10) #define OLPC_GPIO_SMB_CLK geode_gpio(14) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb38994859..dd59a85a918 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int __raw_spin_is_locked(struct raw_spinlock *lock) +static inline int arch_spin_is_locked(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); } -static inline int __raw_spin_is_contended(struct raw_spinlock *lock) +static inline int arch_spin_is_contended(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) +static __always_inline void arch_spin_lock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_lock, lock); } -static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, +static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); } -static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) +static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); } -static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) +static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9357473c8da..b1e70d51e40 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -318,14 +318,14 @@ struct pv_mmu_ops { phys_addr_t phys, pgprot_t flags); }; -struct raw_spinlock; +struct arch_spinlock; struct pv_lock_ops { - int (*spin_is_locked)(struct raw_spinlock *lock); - int (*spin_is_contended)(struct raw_spinlock *lock); - void (*spin_lock)(struct raw_spinlock *lock); - void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); - int (*spin_trylock)(struct raw_spinlock *lock); - void (*spin_unlock)(struct raw_spinlock *lock); + int (*spin_is_locked)(struct arch_spinlock *lock); + int (*spin_is_contended)(struct arch_spinlock *lock); + void (*spin_lock)(struct arch_spinlock *lock); + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); + int (*spin_trylock)(struct arch_spinlock *lock); + void (*spin_unlock)(struct arch_spinlock *lock); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853321d..3089f70c0c5 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -58,7 +58,7 @@ #if (NR_CPUS < 256) #define TICKET_SHIFT 8 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { short inc = 0x0100; @@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp, new; @@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) #else #define TICKET_SHIFT 16 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp; int new; @@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) @@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) } #endif -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); @@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) #ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } @@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (int)(lock)->lock > 0; } @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" "jz 1f\n" @@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) return 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl %1, %0" : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() /* The {read|write|spin}_lock() on x86 are full memory barriers. */ static inline void smp_mb__after_lock(void) { } diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 845f81c8709..dcb48b2edc1 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -5,16 +5,16 @@ # error "please don't include this file directly" #endif -typedef struct raw_spinlock { +typedef struct arch_spinlock { unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_X86_SPINLOCK_TYPES_H */ diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 40e37b10c6c..c5087d79658 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -35,11 +35,16 @@ # endif #endif -/* Node not present */ -#define NUMA_NO_NODE (-1) +/* + * to preserve the visibility of NUMA_NO_NODE definition, + * moved to there from here. May be used independent of + * CONFIG_NUMA. + */ +#include <linux/numa.h> #ifdef CONFIG_NUMA #include <linux/cpumask.h> + #include <asm/mpspec.h> #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4f2e66e29ec..d87f09bc5a5 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -89,7 +89,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_HPET_TIMER) += hpet.o obj-$(CONFIG_K8_NB) += k8.o -obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d5d498fbee4..11a5851f1f5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) continue; cfg = irq_cfg(irq); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) goto unlock; @@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) } __get_cpu_var(vector_irq)[vector] = -1; unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } irq_exit(); diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 3c1b12d461d..e006e56f699 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -4,6 +4,7 @@ #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/init.h> #define LINE_SIZE 80 @@ -133,8 +134,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) return -EINVAL; base = simple_strtoull(line + 5, &ptr, 0); - while (isspace(*ptr)) - ptr++; + ptr = skip_spaces(ptr); if (strncmp(ptr, "size=", 5)) return -EINVAL; @@ -142,14 +142,11 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) size = simple_strtoull(ptr + 5, &ptr, 0); if ((base & 0xfff) || (size & 0xfff)) return -EINVAL; - while (isspace(*ptr)) - ptr++; + ptr = skip_spaces(ptr); if (strncmp(ptr, "type=", 5)) return -EINVAL; - ptr += 5; - while (isspace(*ptr)) - ptr++; + ptr = skip_spaces(ptr + 5); for (i = 0; i < MTRR_NUM_TYPES; ++i) { if (strcmp(ptr, mtrr_strings[i])) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b8ce165dde5..0a0aa1cec8f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; @@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void) /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); - if (!__raw_spin_trylock(&die_lock)) { + if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else - __raw_spin_lock(&die_lock); + arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; @@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ - __raw_spin_unlock(&die_lock); + arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c deleted file mode 100644 index 9b08e852fd1..00000000000 --- a/arch/x86/kernel/geode_32.c +++ /dev/null @@ -1,196 +0,0 @@ -/* - * AMD Geode southbridge support code - * Copyright (C) 2006, Advanced Micro Devices, Inc. - * Copyright (C) 2007, Andres Salomon <dilinger@debian.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public License - * as published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/ioport.h> -#include <linux/io.h> -#include <asm/msr.h> -#include <asm/geode.h> - -static struct { - char *name; - u32 msr; - int size; - u32 base; -} lbars[] = { - { "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 }, - { "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 }, - { "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 }, - { "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 } -}; - -static void __init init_lbars(void) -{ - u32 lo, hi; - int i; - - for (i = 0; i < ARRAY_SIZE(lbars); i++) { - rdmsr(lbars[i].msr, lo, hi); - if (hi & 0x01) - lbars[i].base = lo & 0x0000ffff; - - if (lbars[i].base == 0) - printk(KERN_ERR "geode: Couldn't initialize '%s'\n", - lbars[i].name); - } -} - -int geode_get_dev_base(unsigned int dev) -{ - BUG_ON(dev >= ARRAY_SIZE(lbars)); - return lbars[dev].base; -} -EXPORT_SYMBOL_GPL(geode_get_dev_base); - -/* === GPIO API === */ - -void geode_gpio_set(u32 gpio, unsigned int reg) -{ - u32 base = geode_get_dev_base(GEODE_DEV_GPIO); - - if (!base) - return; - - /* low bank register */ - if (gpio & 0xFFFF) - outl(gpio & 0xFFFF, base + reg); - /* high bank register */ - gpio >>= 16; - if (gpio) - outl(gpio, base + 0x80 + reg); -} -EXPORT_SYMBOL_GPL(geode_gpio_set); - -void geode_gpio_clear(u32 gpio, unsigned int reg) -{ - u32 base = geode_get_dev_base(GEODE_DEV_GPIO); - - if (!base) - return; - - /* low bank register */ - if (gpio & 0xFFFF) - outl((gpio & 0xFFFF) << 16, base + reg); - /* high bank register */ - gpio &= (0xFFFF << 16); - if (gpio) - outl(gpio, base + 0x80 + reg); -} -EXPORT_SYMBOL_GPL(geode_gpio_clear); - -int geode_gpio_isset(u32 gpio, unsigned int reg) -{ - u32 base = geode_get_dev_base(GEODE_DEV_GPIO); - u32 val; - - if (!base) - return 0; - - /* low bank register */ - if (gpio & 0xFFFF) { - val = inl(base + reg) & (gpio & 0xFFFF); - if ((gpio & 0xFFFF) == val) - return 1; - } - /* high bank register */ - gpio >>= 16; - if (gpio) { - val = inl(base + 0x80 + reg) & gpio; - if (gpio == val) - return 1; - } - return 0; -} -EXPORT_SYMBOL_GPL(geode_gpio_isset); - -void geode_gpio_set_irq(unsigned int group, unsigned int irq) -{ - u32 lo, hi; - - if (group > 7 || irq > 15) - return; - - rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi); - - lo &= ~(0xF << (group * 4)); - lo |= (irq & 0xF) << (group * 4); - - wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi); -} -EXPORT_SYMBOL_GPL(geode_gpio_set_irq); - -void geode_gpio_setup_event(unsigned int gpio, int pair, int pme) -{ - u32 base = geode_get_dev_base(GEODE_DEV_GPIO); - u32 offset, shift, val; - - if (gpio >= 24) - offset = GPIO_MAP_W; - else if (gpio >= 16) - offset = GPIO_MAP_Z; - else if (gpio >= 8) - offset = GPIO_MAP_Y; - else - offset = GPIO_MAP_X; - - shift = (gpio % 8) * 4; - - val = inl(base + offset); - - /* Clear whatever was there before */ - val &= ~(0xF << shift); - - /* And set the new value */ - - val |= ((pair & 7) << shift); - - /* Set the PME bit if this is a PME event */ - - if (pme) - val |= (1 << (shift + 3)); - - outl(val, base + offset); -} -EXPORT_SYMBOL_GPL(geode_gpio_setup_event); - -int geode_has_vsa2(void) -{ - static int has_vsa2 = -1; - - if (has_vsa2 == -1) { - u16 val; - - /* - * The VSA has virtual registers that we can query for a - * signature. - */ - outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); - outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); - - val = inw(VSA_VRC_DATA); - has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); - } - - return has_vsa2; -} -EXPORT_SYMBOL_GPL(geode_has_vsa2); - -static int __init geode_southbridge_init(void) -{ - if (!is_geode()) - return -ENODEV; - - init_lbars(); - (void) mfgpt_timer_setup(); - return 0; -} - -postcore_initcall(geode_southbridge_init); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 664bcb7384a..91fd0c70a18 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -294,12 +294,12 @@ void fixup_irqs(void) continue; /* interrupt's are disabled at this point */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); affinity = desc->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); continue; } @@ -326,7 +326,7 @@ void fixup_irqs(void) if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) desc->chip->unmask(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); @@ -356,10 +356,10 @@ void fixup_irqs(void) irq = __get_cpu_var(vector_irq)[vector]; desc = irq_to_desc(irq); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->chip->retrigger) desc->chip->retrigger(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } } } diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c deleted file mode 100644 index 2a62d843f01..00000000000 --- a/arch/x86/kernel/mfgpt_32.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT) - * - * Copyright (C) 2006, Advanced Micro Devices, Inc. - * Copyright (C) 2007, Andres Salomon <dilinger@debian.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public License - * as published by the Free Software Foundation. - * - * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. - */ - -/* - * We are using the 32.768kHz input clock - it's the only one that has the - * ranges we find desirable. The following table lists the suitable - * divisors and the associated Hz, minimum interval and the maximum interval: - * - * Divisor Hz Min Delta (s) Max Delta (s) - * 1 32768 .00048828125 2.000 - * 2 16384 .0009765625 4.000 - * 4 8192 .001953125 8.000 - * 8 4096 .00390625 16.000 - * 16 2048 .0078125 32.000 - * 32 1024 .015625 64.000 - * 64 512 .03125 128.000 - * 128 256 .0625 256.000 - * 256 128 .125 512.000 - */ - -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <asm/geode.h> - -#define MFGPT_DEFAULT_IRQ 7 - -static struct mfgpt_timer_t { - unsigned int avail:1; -} mfgpt_timers[MFGPT_MAX_TIMERS]; - -/* Selected from the table above */ - -#define MFGPT_DIVISOR 16 -#define MFGPT_SCALE 4 /* divisor = 2^(scale) */ -#define MFGPT_HZ (32768 / MFGPT_DIVISOR) -#define MFGPT_PERIODIC (MFGPT_HZ / HZ) - -/* Allow for disabling of MFGPTs */ -static int disable; -static int __init mfgpt_disable(char *s) -{ - disable = 1; - return 1; -} -__setup("nomfgpt", mfgpt_disable); - -/* Reset the MFGPT timers. This is required by some broken BIOSes which already - * do the same and leave the system in an unstable state. TinyBIOS 0.98 is - * affected at least (0.99 is OK with MFGPT workaround left to off). - */ -static int __init mfgpt_fix(char *s) -{ - u32 val, dummy; - - /* The following udocumented bit resets the MFGPT timers */ - val = 0xFF; dummy = 0; - wrmsr(MSR_MFGPT_SETUP, val, dummy); - return 1; -} -__setup("mfgptfix", mfgpt_fix); - -/* - * Check whether any MFGPTs are available for the kernel to use. In most - * cases, firmware that uses AMD's VSA code will claim all timers during - * bootup; we certainly don't want to take them if they're already in use. - * In other cases (such as with VSAless OpenFirmware), the system firmware - * leaves timers available for us to use. - */ - - -static int timers = -1; - -static void geode_mfgpt_detect(void) -{ - int i; - u16 val; - - timers = 0; - - if (disable) { - printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n"); - goto done; - } - - if (!geode_get_dev_base(GEODE_DEV_MFGPT)) { - printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n"); - goto done; - } - - for (i = 0; i < MFGPT_MAX_TIMERS; i++) { - val = geode_mfgpt_read(i, MFGPT_REG_SETUP); - if (!(val & MFGPT_SETUP_SETUP)) { - mfgpt_timers[i].avail = 1; - timers++; - } - } - -done: - printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers); -} - -int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) -{ - u32 msr, mask, value, dummy; - int shift = (cmp == MFGPT_CMP1) ? 0 : 8; - - if (timer < 0 || timer >= MFGPT_MAX_TIMERS) - return -EIO; - - /* - * The register maps for these are described in sections 6.17.1.x of - * the AMD Geode CS5536 Companion Device Data Book. - */ - switch (event) { - case MFGPT_EVENT_RESET: - /* - * XXX: According to the docs, we cannot reset timers above - * 6; that is, resets for 7 and 8 will be ignored. Is this - * a problem? -dilinger - */ - msr = MSR_MFGPT_NR; - mask = 1 << (timer + 24); - break; - - case MFGPT_EVENT_NMI: - msr = MSR_MFGPT_NR; - mask = 1 << (timer + shift); - break; - - case MFGPT_EVENT_IRQ: - msr = MSR_MFGPT_IRQ; - mask = 1 << (timer + shift); - break; - - default: - return -EIO; - } - - rdmsr(msr, value, dummy); - - if (enable) - value |= mask; - else - value &= ~mask; - - wrmsr(msr, value, dummy); - return 0; -} -EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event); - -int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable) -{ - u32 zsel, lpc, dummy; - int shift; - - if (timer < 0 || timer >= MFGPT_MAX_TIMERS) - return -EIO; - - /* - * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA - * is using the same CMP of the timer's Siamese twin, the IRQ is set to - * 2, and we mustn't use nor change it. - * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the - * IRQ of the 1st. This can only happen if forcing an IRQ, calling this - * with *irq==0 is safe. Currently there _are_ no 2 drivers. - */ - rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); - shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4; - if (((zsel >> shift) & 0xF) == 2) - return -EIO; - - /* Choose IRQ: if none supplied, keep IRQ already set or use default */ - if (!*irq) - *irq = (zsel >> shift) & 0xF; - if (!*irq) - *irq = MFGPT_DEFAULT_IRQ; - - /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ - if (*irq < 1 || *irq == 2 || *irq > 15) - return -EIO; - rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); - if (lpc & (1 << *irq)) - return -EIO; - - /* All chosen and checked - go for it */ - if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) - return -EIO; - if (enable) { - zsel = (zsel & ~(0xF << shift)) | (*irq << shift); - wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); - } - - return 0; -} - -static int mfgpt_get(int timer) -{ - mfgpt_timers[timer].avail = 0; - printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer); - return timer; -} - -int geode_mfgpt_alloc_timer(int timer, int domain) -{ - int i; - - if (timers == -1) { - /* timers haven't been detected yet */ - geode_mfgpt_detect(); - } - - if (!timers) - return -1; - - if (timer >= MFGPT_MAX_TIMERS) - return -1; - - if (timer < 0) { - /* Try to find an available timer */ - for (i = 0; i < MFGPT_MAX_TIMERS; i++) { - if (mfgpt_timers[i].avail) - return mfgpt_get(i); - - if (i == 5 && domain == MFGPT_DOMAIN_WORKING) - break; - } - } else { - /* If they requested a specific timer, try to honor that */ - if (mfgpt_timers[timer].avail) - return mfgpt_get(timer); - } - - /* No timers available - too bad */ - return -1; -} -EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer); - - -#ifdef CONFIG_GEODE_MFGPT_TIMER - -/* - * The MFPGT timers on the CS5536 provide us with suitable timers to use - * as clock event sources - not as good as a HPET or APIC, but certainly - * better than the PIT. This isn't a general purpose MFGPT driver, but - * a simplified one designed specifically to act as a clock event source. - * For full details about the MFGPT, please consult the CS5536 data sheet. - */ - -#include <linux/clocksource.h> -#include <linux/clockchips.h> - -static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; -static u16 mfgpt_event_clock; - -static int irq; -static int __init mfgpt_setup(char *str) -{ - get_option(&str, &irq); - return 1; -} -__setup("mfgpt_irq=", mfgpt_setup); - -static void mfgpt_disable_timer(u16 clock) -{ - /* avoid races by clearing CMP1 and CMP2 unconditionally */ - geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN | - MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2); -} - -static int mfgpt_next_event(unsigned long, struct clock_event_device *); -static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *); - -static struct clock_event_device mfgpt_clockevent = { - .name = "mfgpt-timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = mfgpt_set_mode, - .set_next_event = mfgpt_next_event, - .rating = 250, - .cpumask = cpu_all_mask, - .shift = 32 -}; - -static void mfgpt_start_timer(u16 delta) -{ - geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta); - geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); - - geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, - MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); -} - -static void mfgpt_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - mfgpt_disable_timer(mfgpt_event_clock); - - if (mode == CLOCK_EVT_MODE_PERIODIC) - mfgpt_start_timer(MFGPT_PERIODIC); - - mfgpt_tick_mode = mode; -} - -static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) -{ - mfgpt_start_timer(delta); - return 0; -} - -static irqreturn_t mfgpt_tick(int irq, void *dev_id) -{ - u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP); - - /* See if the interrupt was for us */ - if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1))) - return IRQ_NONE; - - /* Turn off the clock (and clear the event) */ - mfgpt_disable_timer(mfgpt_event_clock); - - if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN) - return IRQ_HANDLED; - - /* Clear the counter */ - geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0); - - /* Restart the clock in periodic mode */ - - if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) { - geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, - MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); - } - - mfgpt_clockevent.event_handler(&mfgpt_clockevent); - return IRQ_HANDLED; -} - -static struct irqaction mfgptirq = { - .handler = mfgpt_tick, - .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, - .name = "mfgpt-timer" -}; - -int __init mfgpt_timer_setup(void) -{ - int timer, ret; - u16 val; - - timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); - if (timer < 0) { - printk(KERN_ERR - "mfgpt-timer: Could not allocate a MFPGT timer\n"); - return -ENODEV; - } - - mfgpt_event_clock = timer; - - /* Set up the IRQ on the MFGPT side */ - if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) { - printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq); - return -EIO; - } - - /* And register it with the kernel */ - ret = setup_irq(irq, &mfgptirq); - - if (ret) { - printk(KERN_ERR - "mfgpt-timer: Unable to set up the interrupt.\n"); - goto err; - } - - /* Set the clock scale and enable the event mode for CMP2 */ - val = MFGPT_SCALE | (3 << 8); - - geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val); - - /* Set up the clock event */ - mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, - mfgpt_clockevent.shift); - mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF, - &mfgpt_clockevent); - mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, - &mfgpt_clockevent); - - printk(KERN_INFO - "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n", - timer, irq); - clockevents_register_device(&mfgpt_clockevent); - - return 0; - -err: - geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq); - printk(KERN_ERR - "mfgpt-timer: Unable to set up the MFGPT clock source\n"); - return -EIO; -} - -#endif diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 4006c522adc..9d1d263f786 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c @@ -212,7 +212,7 @@ static int __init olpc_init(void) unsigned char *romsig; /* The ioremap check is dangerous; limit what we run it on */ - if (!is_geode() || geode_has_vsa2()) + if (!is_geode() || cs5535_has_vsa2()) return 0; spin_lock_init(&ec_lock); @@ -244,7 +244,7 @@ static int __init olpc_init(void) (unsigned char *) &olpc_platform_info.ecver, 1); /* check to see if the VSA exists */ - if (geode_has_vsa2()) + if (cs5535_has_vsa2()) olpc_platform_info.flags |= OLPC_F_VSA; printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 3a7c5a44082..676b8c77a97 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -8,9 +8,9 @@ #include <asm/paravirt.h> static inline void -default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } struct pv_lock_ops pv_lock_ops = { diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c index 201eab63b05..fda313ebbb0 100644 --- a/arch/x86/kernel/reboot_fixups_32.c +++ b/arch/x86/kernel/reboot_fixups_32.c @@ -12,7 +12,7 @@ #include <linux/interrupt.h> #include <asm/reboot_fixups.h> #include <asm/msr.h> -#include <asm/geode.h> +#include <linux/cs5535.h> static void cs5530a_warm_reset(struct pci_dev *dev) { diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index eed156851f5..0aa5fed8b9e 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; @@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void) * previous TSC that was measured (possibly on * another CPU) and update the previous TSC timestamp. */ - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); prev = last_tsc; rdtsc_barrier(); now = get_cycles(); rdtsc_barrier(); last_tsc = now; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); /* * Be nice every now and then (and also check whether @@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void) * we saw a time-warp of the TSC going backwards: */ if (unlikely(prev > now)) { - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); max_warp = max(max_warp, prev - now); nr_warps++; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); } } WARN(!(now-start), diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 36a5141108d..24ded31b5ae 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -120,14 +120,14 @@ struct xen_spinlock { unsigned short spinners; /* count of waiting cpus */ }; -static int xen_spin_is_locked(struct raw_spinlock *lock) +static int xen_spin_is_locked(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; return xl->lock != 0; } -static int xen_spin_is_contended(struct raw_spinlock *lock) +static int xen_spin_is_contended(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; @@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock) return xl->spinners != 0; } -static int xen_spin_trylock(struct raw_spinlock *lock) +static int xen_spin_trylock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; u8 old = 1; @@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock __get_cpu_var(lock_spinners) = prev; } -static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) +static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; struct xen_spinlock *prev; @@ -254,7 +254,7 @@ out: return ret; } -static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) +static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; unsigned timeout; @@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) spin_time_accum_total(start_spin); } -static void xen_spin_lock(struct raw_spinlock *lock) +static void xen_spin_lock(struct arch_spinlock *lock) { __xen_spin_lock(lock, false); } -static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) +static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); } @@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) } } -static void xen_spin_unlock(struct raw_spinlock *lock) +static void xen_spin_unlock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index a1badb32fcd..8cd38484e13 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index cfb0b2f5f63..e2f80463ed0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -283,7 +283,7 @@ struct cfq_data { */ struct cfq_queue oom_cfqq; - unsigned long last_end_sync_rq; + unsigned long last_delayed_sync; /* List of cfq groups being managed on this device*/ struct hlist_head cfqg_list; @@ -319,7 +319,6 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ - CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */ }; #define CFQ_CFQQ_FNS(name) \ @@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); -CFQ_CFQQ_FNS(wait_busy_done); #undef CFQ_CFQQ_FNS #ifdef CONFIG_DEBUG_CFQ_IOSCHED @@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_busy(cfqq); - cfq_clear_cfqq_wait_busy_done(cfqq); /* * store what was left of this slice, if the queue idled/timed out @@ -1750,6 +1747,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, return NULL; /* + * Don't search priority tree if it's the only queue in the group. + */ + if (cur_cfqq->cfqg->nr_cfqq == 1) + return NULL; + + /* * We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. @@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd) cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; cfqd->serving_type = cfqg->saved_workload; cfqd->serving_prio = cfqg->saved_serving_prio; - } + } else + cfqd->workload_expires = jiffies - 1; + choose_service_tree(cfqd, cfqg); } @@ -2128,14 +2133,35 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) if (!cfqd->rq_queued) return NULL; + /* - * The active queue has run out of time, expire it and select new. + * We were waiting for group to get backlogged. Expire the queue */ - if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq)) - && !cfq_cfqq_must_dispatch(cfqq)) + if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) goto expire; /* + * The active queue has run out of time, expire it and select new. + */ + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { + /* + * If slice had not expired at the completion of last request + * we might not have turned on wait_busy flag. Don't expire + * the queue yet. Allow the group to get backlogged. + * + * The very fact that we have used the slice, that means we + * have been idling all along on this queue and it should be + * ok to wait for this request to complete. + */ + if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) + && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; + goto keep_queue; + } else + goto expire; + } + + /* * The active queue has requests and isn't expired, allow it to * dispatch. */ @@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) * based on the last sync IO we serviced */ if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { - unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; + unsigned long last_sync = jiffies - cfqd->last_delayed_sync; unsigned int depth; depth = last_sync / cfqd->cfq_slice[1]; @@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { - if (cfq_cfqq_wait_busy(cfqq)) { - cfq_clear_cfqq_wait_busy(cfqq); - cfq_mark_cfqq_wait_busy_done(cfqq); - } /* * Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots @@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { del_timer(&cfqd->idle_slice_timer); + cfq_clear_cfqq_wait_request(cfqq); __blk_run_queue(cfqd->queue); } else cfq_mark_cfqq_must_dispatch(cfqq); @@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) cfqd->hw_tag = 0; } +static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct cfq_io_context *cic = cfqd->active_cic; + + /* If there are other queues in the group, don't wait */ + if (cfqq->cfqg->nr_cfqq > 1) + return false; + + if (cfq_slice_used(cfqq)) + return true; + + /* if slice left is less than think time, wait busy */ + if (cic && sample_valid(cic->ttime_samples) + && (cfqq->slice_end - jiffies < cic->ttime_mean)) + return true; + + /* + * If think times is less than a jiffy than ttime_mean=0 and above + * will not be true. It might happen that slice has not expired yet + * but will expire soon (4-5 ns) during select_queue(). To cover the + * case where think time is less than a jiffy, mark the queue wait + * busy if only 1 jiffy is left in the slice. + */ + if (cfqq->slice_end - jiffies == 1) + return true; + + return false; +} + static void cfq_completed_request(struct request_queue *q, struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); @@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) if (sync) { RQ_CIC(rq)->last_end_request = now; - cfqd->last_end_sync_rq = now; + if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) + cfqd->last_delayed_sync = now; } /* @@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) } /* - * If this queue consumed its slice and this is last queue - * in the group, wait for next request before we expire - * the queue + * Should we wait for next request to come in before we expire + * the queue. */ - if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) { + if (cfq_should_wait_busy(cfqd, cfqq)) { cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; cfq_mark_cfqq_wait_busy(cfqq); } @@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_latency = 1; cfqd->cfq_group_isolation = 0; cfqd->hw_tag = -1; - cfqd->last_end_sync_rq = jiffies; + /* + * we optimistically start assuming sync ops weren't delayed in last + * second, in order to have larger depth for async operations. + */ + cfqd->last_delayed_sync = jiffies - HZ; INIT_RCU_HEAD(&cfqd->rcu); return cfqd; } diff --git a/drivers/Kconfig b/drivers/Kconfig index 26e434ad373..8a07363417e 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -96,6 +96,8 @@ source "drivers/edac/Kconfig" source "drivers/rtc/Kconfig" +source "drivers/clocksource/Kconfig" + source "drivers/dma/Kconfig" source "drivers/dca/Kconfig" diff --git a/drivers/base/node.c b/drivers/base/node.c index 1fe5536d404..70122791683 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -173,6 +173,47 @@ static ssize_t node_read_distance(struct sys_device * dev, } static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); +#ifdef CONFIG_HUGETLBFS +/* + * hugetlbfs per node attributes registration interface: + * When/if hugetlb[fs] subsystem initializes [sometime after this module], + * it will register its per node attributes for all online nodes with + * memory. It will also call register_hugetlbfs_with_node(), below, to + * register its attribute registration functions with this node driver. + * Once these hooks have been initialized, the node driver will call into + * the hugetlb module to [un]register attributes for hot-plugged nodes. + */ +static node_registration_func_t __hugetlb_register_node; +static node_registration_func_t __hugetlb_unregister_node; + +static inline bool hugetlb_register_node(struct node *node) +{ + if (__hugetlb_register_node && + node_state(node->sysdev.id, N_HIGH_MEMORY)) { + __hugetlb_register_node(node); + return true; + } + return false; +} + +static inline void hugetlb_unregister_node(struct node *node) +{ + if (__hugetlb_unregister_node) + __hugetlb_unregister_node(node); +} + +void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister) +{ + __hugetlb_register_node = doregister; + __hugetlb_unregister_node = unregister; +} +#else +static inline void hugetlb_register_node(struct node *node) {} + +static inline void hugetlb_unregister_node(struct node *node) {} +#endif + /* * register_node - Setup a sysfs device for a node. @@ -196,6 +237,8 @@ int register_node(struct node *node, int num, struct node *parent) sysdev_create_file(&node->sysdev, &attr_distance); scan_unevictable_register_node(node); + + hugetlb_register_node(node); } return error; } @@ -216,6 +259,7 @@ void unregister_node(struct node *node) sysdev_remove_file(&node->sysdev, &attr_distance); scan_unevictable_unregister_node(node); + hugetlb_unregister_node(node); /* no-op, if memoryless node */ sysdev_unregister(&node->sysdev); } @@ -227,26 +271,43 @@ struct node node_devices[MAX_NUMNODES]; */ int register_cpu_under_node(unsigned int cpu, unsigned int nid) { - if (node_online(nid)) { - struct sys_device *obj = get_cpu_sysdev(cpu); - if (!obj) - return 0; - return sysfs_create_link(&node_devices[nid].sysdev.kobj, - &obj->kobj, - kobject_name(&obj->kobj)); - } + int ret; + struct sys_device *obj; - return 0; + if (!node_online(nid)) + return 0; + + obj = get_cpu_sysdev(cpu); + if (!obj) + return 0; + + ret = sysfs_create_link(&node_devices[nid].sysdev.kobj, + &obj->kobj, + kobject_name(&obj->kobj)); + if (ret) + return ret; + + return sysfs_create_link(&obj->kobj, + &node_devices[nid].sysdev.kobj, + kobject_name(&node_devices[nid].sysdev.kobj)); } int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { - if (node_online(nid)) { - struct sys_device *obj = get_cpu_sysdev(cpu); - if (obj) - sysfs_remove_link(&node_devices[nid].sysdev.kobj, - kobject_name(&obj->kobj)); - } + struct sys_device *obj; + + if (!node_online(nid)) + return 0; + + obj = get_cpu_sysdev(cpu); + if (!obj) + return 0; + + sysfs_remove_link(&node_devices[nid].sysdev.kobj, + kobject_name(&obj->kobj)); + sysfs_remove_link(&obj->kobj, + kobject_name(&node_devices[nid].sysdev.kobj)); + return 0; } @@ -268,6 +329,7 @@ static int get_nid_for_pfn(unsigned long pfn) /* register memory section under specified node if it spans that node */ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) { + int ret; unsigned long pfn, sect_start_pfn, sect_end_pfn; if (!mem_blk) @@ -284,9 +346,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) continue; if (page_nid != nid) continue; - return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, + ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, &mem_blk->sysdev.kobj, kobject_name(&mem_blk->sysdev.kobj)); + if (ret) + return ret; + + return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj, + &node_devices[nid].sysdev.kobj, + kobject_name(&node_devices[nid].sysdev.kobj)); } /* mem section does not span the specified node */ return 0; @@ -295,12 +363,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) /* unregister memory section under all nodes that it spans */ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) { - nodemask_t unlinked_nodes; + NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); unsigned long pfn, sect_start_pfn, sect_end_pfn; - if (!mem_blk) + if (!mem_blk) { + NODEMASK_FREE(unlinked_nodes); return -EFAULT; - nodes_clear(unlinked_nodes); + } + if (!unlinked_nodes) + return -ENOMEM; + nodes_clear(*unlinked_nodes); sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { @@ -311,11 +383,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) continue; if (!node_online(nid)) continue; - if (node_test_and_set(nid, unlinked_nodes)) + if (node_test_and_set(nid, *unlinked_nodes)) continue; sysfs_remove_link(&node_devices[nid].sysdev.kobj, kobject_name(&mem_blk->sysdev.kobj)); + sysfs_remove_link(&mem_blk->sysdev.kobj, + kobject_name(&node_devices[nid].sysdev.kobj)); } + NODEMASK_FREE(unlinked_nodes); return 0; } @@ -345,9 +420,77 @@ static int link_mem_sections(int nid) } return err; } -#else + +#ifdef CONFIG_HUGETLBFS +/* + * Handle per node hstate attribute [un]registration on transistions + * to/from memoryless state. + */ +static void node_hugetlb_work(struct work_struct *work) +{ + struct node *node = container_of(work, struct node, node_work); + + /* + * We only get here when a node transitions to/from memoryless state. + * We can detect which transition occurred by examining whether the + * node has memory now. hugetlb_register_node() already check this + * so we try to register the attributes. If that fails, then the + * node has transitioned to memoryless, try to unregister the + * attributes. + */ + if (!hugetlb_register_node(node)) + hugetlb_unregister_node(node); +} + +static void init_node_hugetlb_work(int nid) +{ + INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work); +} + +static int node_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct memory_notify *mnb = arg; + int nid = mnb->status_change_nid; + + switch (action) { + case MEM_ONLINE: + case MEM_OFFLINE: + /* + * offload per node hstate [un]registration to a work thread + * when transitioning to/from memoryless state. + */ + if (nid != NUMA_NO_NODE) + schedule_work(&node_devices[nid].node_work); + break; + + case MEM_GOING_ONLINE: + case MEM_GOING_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + default: + break; + } + + return NOTIFY_OK; +} +#endif /* CONFIG_HUGETLBFS */ +#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */ + static int link_mem_sections(int nid) { return 0; } -#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ + !defined(CONFIG_HUGETLBFS) +static inline int node_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + return NOTIFY_OK; +} + +static void init_node_hugetlb_work(int nid) { } + +#endif int register_one_node(int nid) { @@ -371,6 +514,9 @@ int register_one_node(int nid) /* link memory sections under this node */ error = link_mem_sections(nid); + + /* initialize work queue for memory hot plug */ + init_node_hugetlb_work(nid); } return error; @@ -460,13 +606,17 @@ static int node_states_init(void) return err; } +#define NODE_CALLBACK_PRI 2 /* lower than SLAB */ static int __init register_node_type(void) { int ret; ret = sysdev_class_register(&node_class); - if (!ret) + if (!ret) { ret = node_states_init(); + hotplug_memory_notifier(node_memory_callback, + NODE_CALLBACK_PRI); + } /* * Note: we're not going to unregister the node class if we fail diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 436a090b532..4e0726aa53b 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1271,8 +1271,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, goto fail; } - if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) - != CRYPTO_ALG_TYPE_HASH) { + if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { retcode = ERR_AUTH_ALG_ND; goto fail; } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 5c01f747571..3266b4f65da 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3497,6 +3497,9 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))) return -EPERM; + if (WARN_ON(size < 0 || size > sizeof(inparam))) + return -EINVAL; + /* copyin */ CLEARSTRUCT(&inparam); if (_IOC_DIR(cmd) & _IOC_WRITE) @@ -4162,7 +4165,7 @@ static int floppy_resume(struct device *dev) return 0; } -static struct dev_pm_ops floppy_pm_ops = { +static const struct dev_pm_ops floppy_pm_ops = { .resume = floppy_resume, .restore = floppy_resume, }; diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 0877d3628fd..d1fd032e751 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -169,13 +169,6 @@ static int __init xd_init(void) init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; - if (!xd_dma_buffer) - xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); - if (!xd_dma_buffer) { - printk(KERN_ERR "xd: Out of memory.\n"); - return -ENOMEM; - } - err = -EBUSY; if (register_blkdev(XT_DISK_MAJOR, "xd")) goto out1; @@ -202,6 +195,19 @@ static int __init xd_init(void) xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); } + /* + * With the drive detected, xd_maxsectors should now be known. + * If xd_maxsectors is 0, nothing was detected and we fall through + * to return -ENODEV + */ + if (!xd_dma_buffer && xd_maxsectors) { + xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); + if (!xd_dma_buffer) { + printk(KERN_ERR "xd: Out of memory.\n"); + goto out3; + } + } + err = -ENODEV; if (!xd_drives) goto out3; @@ -249,15 +255,17 @@ out4: for (i = 0; i < xd_drives; i++) put_disk(xd_gendisk[i]); out3: - release_region(xd_iobase,4); + if (xd_maxsectors) + release_region(xd_iobase,4); + + if (xd_dma_buffer) + xd_dma_mem_free((unsigned long)xd_dma_buffer, + xd_maxsectors * 0x200); out2: blk_cleanup_queue(xd_queue); out1a: unregister_blkdev(XT_DISK_MAJOR, "xd"); out1: - if (xd_dma_buffer) - xd_dma_mem_free((unsigned long)xd_dma_buffer, - xd_maxsectors * 0x200); return err; Enomem: err = -ENOMEM; diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index b8a5d654d3d..fe62bd0e17b 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c @@ -931,7 +931,7 @@ static struct hv_ops hvc_iucv_ops = { }; /* Suspend / resume device operations */ -static struct dev_pm_ops hvc_iucv_pm_ops = { +static const struct dev_pm_ops hvc_iucv_pm_ops = { .freeze = hvc_iucv_pm_freeze, .thaw = hvc_iucv_pm_restore_thaw, .restore = hvc_iucv_pm_restore_thaw, diff --git a/drivers/char/mem.c b/drivers/char/mem.c index fba76fb55ab..be832b6f827 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -34,6 +34,16 @@ # include <linux/efi.h> #endif +static inline unsigned long size_inside_page(unsigned long start, + unsigned long size) +{ + unsigned long sz; + + sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); + + return min(sz, size); +} + /* * Architectures vary in how they handle caching for addresses * outside of main memory. @@ -126,9 +136,7 @@ static ssize_t read_mem(struct file * file, char __user * buf, #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { - sz = PAGE_SIZE - p; - if (sz > count) - sz = count; + sz = size_inside_page(p, count); if (sz > 0) { if (clear_user(buf, sz)) return -EFAULT; @@ -141,15 +149,9 @@ static ssize_t read_mem(struct file * file, char __user * buf, #endif while (count > 0) { - /* - * Handle first page in case it's not aligned - */ - if (-p & (PAGE_SIZE - 1)) - sz = -p & (PAGE_SIZE - 1); - else - sz = PAGE_SIZE; + unsigned long remaining; - sz = min_t(unsigned long, sz, count); + sz = size_inside_page(p, count); if (!range_is_allowed(p >> PAGE_SHIFT, count)) return -EPERM; @@ -163,12 +165,10 @@ static ssize_t read_mem(struct file * file, char __user * buf, if (!ptr) return -EFAULT; - if (copy_to_user(buf, ptr, sz)) { - unxlate_dev_mem_ptr(p, ptr); - return -EFAULT; - } - + remaining = copy_to_user(buf, ptr, sz); unxlate_dev_mem_ptr(p, ptr); + if (remaining) + return -EFAULT; buf += sz; p += sz; @@ -196,9 +196,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf, #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { - unsigned long sz = PAGE_SIZE - p; - if (sz > count) - sz = count; + sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; @@ -208,15 +206,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf, #endif while (count > 0) { - /* - * Handle first page in case it's not aligned - */ - if (-p & (PAGE_SIZE - 1)) - sz = -p & (PAGE_SIZE - 1); - else - sz = PAGE_SIZE; - - sz = min_t(unsigned long, sz, count); + sz = size_inside_page(p, count); if (!range_is_allowed(p >> PAGE_SHIFT, sz)) return -EPERM; @@ -234,16 +224,14 @@ static ssize_t write_mem(struct file * file, const char __user * buf, } copied = copy_from_user(ptr, buf, sz); + unxlate_dev_mem_ptr(p, ptr); if (copied) { written += sz - copied; - unxlate_dev_mem_ptr(p, ptr); if (written) break; return -EFAULT; } - unxlate_dev_mem_ptr(p, ptr); - buf += sz; p += sz; count -= sz; @@ -417,27 +405,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf, #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE && low_count > 0) { - size_t tmp = PAGE_SIZE - p; - if (tmp > low_count) tmp = low_count; - if (clear_user(buf, tmp)) + sz = size_inside_page(p, low_count); + if (clear_user(buf, sz)) return -EFAULT; - buf += tmp; - p += tmp; - read += tmp; - low_count -= tmp; - count -= tmp; + buf += sz; + p += sz; + read += sz; + low_count -= sz; + count -= sz; } #endif while (low_count > 0) { - /* - * Handle first page in case it's not aligned - */ - if (-p & (PAGE_SIZE - 1)) - sz = -p & (PAGE_SIZE - 1); - else - sz = PAGE_SIZE; - - sz = min_t(unsigned long, sz, low_count); + sz = size_inside_page(p, low_count); /* * On ia64 if a page has been mapped somewhere as @@ -461,21 +440,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf, if (!kbuf) return -ENOMEM; while (count > 0) { - int len = count; - - if (len > PAGE_SIZE) - len = PAGE_SIZE; - len = vread(kbuf, (char *)p, len); - if (!len) + sz = size_inside_page(p, count); + sz = vread(kbuf, (char *)p, sz); + if (!sz) break; - if (copy_to_user(buf, kbuf, len)) { + if (copy_to_user(buf, kbuf, sz)) { free_page((unsigned long)kbuf); return -EFAULT; } - count -= len; - buf += len; - read += len; - p += len; + count -= sz; + buf += sz; + read += sz; + p += sz; } free_page((unsigned long)kbuf); } @@ -485,7 +461,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf, static inline ssize_t -do_write_kmem(void *p, unsigned long realp, const char __user * buf, +do_write_kmem(unsigned long p, const char __user *buf, size_t count, loff_t *ppos) { ssize_t written, sz; @@ -494,14 +470,11 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf, written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ - if (realp < PAGE_SIZE) { - unsigned long sz = PAGE_SIZE - realp; - if (sz > count) - sz = count; + if (p < PAGE_SIZE) { + sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; - realp += sz; count -= sz; written += sz; } @@ -509,22 +482,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf, while (count > 0) { char *ptr; - /* - * Handle first page in case it's not aligned - */ - if (-realp & (PAGE_SIZE - 1)) - sz = -realp & (PAGE_SIZE - 1); - else - sz = PAGE_SIZE; - sz = min_t(unsigned long, sz, count); + sz = size_inside_page(p, count); /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur */ - ptr = xlate_dev_kmem_ptr(p); + ptr = xlate_dev_kmem_ptr((char *)p); copied = copy_from_user(ptr, buf, sz); if (copied) { @@ -535,7 +501,6 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf, } buf += sz; p += sz; - realp += sz; count -= sz; written += sz; } @@ -554,19 +519,14 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, unsigned long p = *ppos; ssize_t wrote = 0; ssize_t virtr = 0; - ssize_t written; char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ if (p < (unsigned long) high_memory) { - - wrote = count; - if (count > (unsigned long) high_memory - p) - wrote = (unsigned long) high_memory - p; - - written = do_write_kmem((void*)p, p, buf, wrote, ppos); - if (written != wrote) - return written; - wrote = written; + unsigned long to_write = min_t(unsigned long, count, + (unsigned long)high_memory - p); + wrote = do_write_kmem(p, buf, to_write, ppos); + if (wrote != to_write) + return wrote; p += wrote; buf += wrote; count -= wrote; @@ -577,24 +537,21 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, if (!kbuf) return wrote ? wrote : -ENOMEM; while (count > 0) { - int len = count; - - if (len > PAGE_SIZE) - len = PAGE_SIZE; - if (len) { - written = copy_from_user(kbuf, buf, len); - if (written) { - if (wrote + virtr) - break; - free_page((unsigned long)kbuf); - return -EFAULT; - } + unsigned long sz = size_inside_page(p, count); + unsigned long n; + + n = copy_from_user(kbuf, buf, sz); + if (n) { + if (wrote + virtr) + break; + free_page((unsigned long)kbuf); + return -EFAULT; } - len = vwrite(kbuf, (char *)p, len); - count -= len; - buf += len; - virtr += len; - p += len; + sz = vwrite(kbuf, (char *)p, sz); + count -= sz; + buf += sz; + virtr += sz; + p += sz; } free_page((unsigned long)kbuf); } diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 96f1cd086dd..94a136e96c0 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -60,9 +60,7 @@ static DEFINE_MUTEX(misc_mtx); * Assigned numbers, used for dynamic minors */ #define DYNAMIC_MINORS 64 /* like dynamic majors */ -static unsigned char misc_minors[DYNAMIC_MINORS / 8]; - -extern int pmu_device_init(void); +static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS); #ifdef CONFIG_PROC_FS static void *misc_seq_start(struct seq_file *seq, loff_t *pos) @@ -198,24 +196,23 @@ int misc_register(struct miscdevice * misc) } if (misc->minor == MISC_DYNAMIC_MINOR) { - int i = DYNAMIC_MINORS; - while (--i >= 0) - if ( (misc_minors[i>>3] & (1 << (i&7))) == 0) - break; - if (i<0) { + int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS); + if (i >= DYNAMIC_MINORS) { mutex_unlock(&misc_mtx); return -EBUSY; } - misc->minor = i; + misc->minor = DYNAMIC_MINORS - i - 1; + set_bit(i, misc_minors); } - if (misc->minor < DYNAMIC_MINORS) - misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7); dev = MKDEV(MISC_MAJOR, misc->minor); misc->this_device = device_create(misc_class, misc->parent, dev, misc, "%s", misc->name); if (IS_ERR(misc->this_device)) { + int i = DYNAMIC_MINORS - misc->minor - 1; + if (i < DYNAMIC_MINORS && i >= 0) + clear_bit(i, misc_minors); err = PTR_ERR(misc->this_device); goto out; } @@ -242,7 +239,7 @@ int misc_register(struct miscdevice * misc) int misc_deregister(struct miscdevice *misc) { - int i = misc->minor; + int i = DYNAMIC_MINORS - misc->minor - 1; if (list_empty(&misc->list)) return -EINVAL; @@ -250,9 +247,8 @@ int misc_deregister(struct miscdevice *misc) mutex_lock(&misc_mtx); list_del(&misc->list); device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor)); - if (i < DYNAMIC_MINORS && i>0) { - misc_minors[i>>3] &= ~(1 << (misc->minor & 7)); - } + if (i < DYNAMIC_MINORS && i >= 0) + clear_bit(i, misc_minors); mutex_unlock(&misc_mtx); return 0; } diff --git a/drivers/char/random.c b/drivers/char/random.c index dcd08635cf1..8258982b49e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1245,12 +1245,8 @@ static int proc_do_uuid(ctl_table *table, int write, if (uuid[8] == 0) generate_random_uuid(uuid); - sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" - "%02x%02x%02x%02x%02x%02x", - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7], - uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15]); + sprintf(buf, "%pU", uuid); + fake_table.data = buf; fake_table.maxlen = sizeof(buf); @@ -1310,7 +1306,7 @@ ctl_table random_table[] = { /******************************************************************** * - * Random funtions for networking + * Random functions for networking * ********************************************************************/ diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 1e3d728dbf7..e43fbc66aef 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -184,12 +184,10 @@ static DECLARE_WORK(console_work, console_callback); * fg_console is the current virtual console, * last_console is the last used one, * want_console is the console we want to switch to, - * kmsg_redirect is the console for kernel messages, */ int fg_console; int last_console; int want_console = -1; -int kmsg_redirect; /* * For each existing display, we have a pointer to console currently visible @@ -2434,6 +2432,37 @@ struct tty_driver *console_driver; #ifdef CONFIG_VT_CONSOLE +/** + * vt_kmsg_redirect() - Sets/gets the kernel message console + * @new: The new virtual terminal number or -1 if the console should stay + * unchanged + * + * By default, the kernel messages are always printed on the current virtual + * console. However, the user may modify that default with the + * TIOCL_SETKMSGREDIRECT ioctl call. + * + * This function sets the kernel message console to be @new. It returns the old + * virtual console number. The virtual terminal number 0 (both as parameter and + * return value) means no redirection (i.e. always printed on the currently + * active console). + * + * The parameter -1 means that only the current console is returned, but the + * value is not modified. You may use the macro vt_get_kmsg_redirect() in that + * case to make the code more understandable. + * + * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores + * the parameter and always returns 0. + */ +int vt_kmsg_redirect(int new) +{ + static int kmsg_con; + + if (new != -1) + return xchg(&kmsg_con, new); + else + return kmsg_con; +} + /* * Console on virtual terminal * @@ -2448,6 +2477,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) const ushort *start; ushort cnt = 0; ushort myx; + int kmsg_console; /* console busy or not yet initialized */ if (!printable) @@ -2455,8 +2485,9 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) if (!spin_trylock(&printing_lock)) return; - if (kmsg_redirect && vc_cons_allocated(kmsg_redirect - 1)) - vc = vc_cons[kmsg_redirect - 1].d; + kmsg_console = vt_get_kmsg_redirect(); + if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) + vc = vc_cons[kmsg_console - 1].d; /* read `x' only after setting currcons properly (otherwise the `x' macro will read the x of the foreground console). */ @@ -2613,7 +2644,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) ret = set_vesa_blanking(p); break; case TIOCL_GETKMSGREDIRECT: - data = kmsg_redirect; + data = vt_get_kmsg_redirect(); ret = __put_user(data, p); break; case TIOCL_SETKMSGREDIRECT: @@ -2623,7 +2654,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) if (get_user(data, p+1)) ret = -EFAULT; else - kmsg_redirect = data; + vt_kmsg_redirect(data); } break; case TIOCL_GETFGCONSOLE: diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig new file mode 100644 index 00000000000..08f726c5fee --- /dev/null +++ b/drivers/clocksource/Kconfig @@ -0,0 +1,9 @@ +config CS5535_CLOCK_EVENT_SRC + tristate "CS5535/CS5536 high-res timer (MFGPT) events" + depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT + help + This driver provides a clock event source based on the MFGPT + timer(s) in the CS5535 and CS5536 companion chips. + MFGPTs have a better resolution and max interval than the + generic PIT, and are suitable for use as high-res timers. + diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index eef216f7f61..be61ece6330 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o +obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c new file mode 100644 index 00000000000..27d20fac19d --- /dev/null +++ b/drivers/clocksource/cs5535-clockevt.c @@ -0,0 +1,197 @@ +/* + * Clock event driver for the CS5535/CS5536 + * + * Copyright (C) 2006, Advanced Micro Devices, Inc. + * Copyright (C) 2007 Andres Salomon <dilinger@debian.org> + * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. + */ + +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/cs5535.h> +#include <linux/clockchips.h> + +#define DRV_NAME "cs5535-clockevt" + +static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; +module_param_named(irq, timer_irq, int, 0644); +MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); + +/* + * We are using the 32.768kHz input clock - it's the only one that has the + * ranges we find desirable. The following table lists the suitable + * divisors and the associated Hz, minimum interval and the maximum interval: + * + * Divisor Hz Min Delta (s) Max Delta (s) + * 1 32768 .00048828125 2.000 + * 2 16384 .0009765625 4.000 + * 4 8192 .001953125 8.000 + * 8 4096 .00390625 16.000 + * 16 2048 .0078125 32.000 + * 32 1024 .015625 64.000 + * 64 512 .03125 128.000 + * 128 256 .0625 256.000 + * 256 128 .125 512.000 + */ + +static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; +static struct cs5535_mfgpt_timer *cs5535_event_clock; + +/* Selected from the table above */ + +#define MFGPT_DIVISOR 16 +#define MFGPT_SCALE 4 /* divisor = 2^(scale) */ +#define MFGPT_HZ (32768 / MFGPT_DIVISOR) +#define MFGPT_PERIODIC (MFGPT_HZ / HZ) + +/* + * The MFPGT timers on the CS5536 provide us with suitable timers to use + * as clock event sources - not as good as a HPET or APIC, but certainly + * better than the PIT. This isn't a general purpose MFGPT driver, but + * a simplified one designed specifically to act as a clock event source. + * For full details about the MFGPT, please consult the CS5536 data sheet. + */ + +static void disable_timer(struct cs5535_mfgpt_timer *timer) +{ + /* avoid races by clearing CMP1 and CMP2 unconditionally */ + cs5535_mfgpt_write(timer, MFGPT_REG_SETUP, + (uint16_t) ~MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP1 | + MFGPT_SETUP_CMP2); +} + +static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta) +{ + cs5535_mfgpt_write(timer, MFGPT_REG_CMP2, delta); + cs5535_mfgpt_write(timer, MFGPT_REG_COUNTER, 0); + + cs5535_mfgpt_write(timer, MFGPT_REG_SETUP, + MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); +} + +static void mfgpt_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + disable_timer(cs5535_event_clock); + + if (mode == CLOCK_EVT_MODE_PERIODIC) + start_timer(cs5535_event_clock, MFGPT_PERIODIC); + + cs5535_tick_mode = mode; +} + +static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) +{ + start_timer(cs5535_event_clock, delta); + return 0; +} + +static struct clock_event_device cs5535_clockevent = { + .name = DRV_NAME, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_mode = mfgpt_set_mode, + .set_next_event = mfgpt_next_event, + .rating = 250, + .cpumask = cpu_all_mask, + .shift = 32 +}; + +static irqreturn_t mfgpt_tick(int irq, void *dev_id) +{ + uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP); + + /* See if the interrupt was for us */ + if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1))) + return IRQ_NONE; + + /* Turn off the clock (and clear the event) */ + disable_timer(cs5535_event_clock); + + if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN) + return IRQ_HANDLED; + + /* Clear the counter */ + cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0); + + /* Restart the clock in periodic mode */ + + if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC) + cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, + MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); + + cs5535_clockevent.event_handler(&cs5535_clockevent); + return IRQ_HANDLED; +} + +static struct irqaction mfgptirq = { + .handler = mfgpt_tick, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, + .name = DRV_NAME, +}; + +static int __init cs5535_mfgpt_init(void) +{ + struct cs5535_mfgpt_timer *timer; + int ret; + uint16_t val; + + timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); + if (!timer) { + printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); + return -ENODEV; + } + cs5535_event_clock = timer; + + /* Set up the IRQ on the MFGPT side */ + if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) { + printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n", + timer_irq); + return -EIO; + } + + /* And register it with the kernel */ + ret = setup_irq(timer_irq, &mfgptirq); + if (ret) { + printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n"); + goto err; + } + + /* Set the clock scale and enable the event mode for CMP2 */ + val = MFGPT_SCALE | (3 << 8); + + cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val); + + /* Set up the clock event */ + cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, + cs5535_clockevent.shift); + cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF, + &cs5535_clockevent); + cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, + &cs5535_clockevent); + + printk(KERN_INFO DRV_NAME + ": Registering MFGPT timer as a clock event, using IRQ %d\n", + timer_irq); + clockevents_register_device(&cs5535_clockevent); + + return 0; + +err: + cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq); + printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n"); + return -EIO; +} + +module_init(cs5535_mfgpt_init); + +MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>"); +MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index a4bec3f919a..1c1ceb4f218 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -69,9 +69,6 @@ static int ladder_select_state(struct cpuidle_device *dev) int last_residency, last_idx = ldev->last_state_idx; int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); - if (unlikely(!ldev)) - return 0; - /* Special case when user has set very strict latency requirement */ if (unlikely(latency_req == 0)) { ladder_do_selection(ldev, last_idx, 0); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c52ac9efd0b..f15112569c1 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1188,7 +1188,7 @@ static int at_dma_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops at_dma_dev_pm_ops = { +static const struct dev_pm_ops at_dma_dev_pm_ops = { .suspend_noirq = at_dma_suspend_noirq, .resume_noirq = at_dma_resume_noirq, }; diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 2eea823516a..285bed0fe17 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1427,7 +1427,7 @@ static int dw_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops dw_dev_pm_ops = { +static const struct dev_pm_ops dw_dev_pm_ops = { .suspend_noirq = dw_suspend_noirq, .resume_noirq = dw_resume_noirq, }; diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index fb6bb64e886..3ebc61067e5 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -1313,7 +1313,7 @@ static int txx9dmac_resume_noirq(struct device *dev) } -static struct dev_pm_ops txx9dmac_dev_pm_ops = { +static const struct dev_pm_ops txx9dmac_dev_pm_ops = { .suspend_noirq = txx9dmac_suspend_noirq, .resume_noirq = txx9dmac_resume_noirq, }; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index ebb9e51deb0..1b03ba1d083 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -7,7 +7,7 @@ menu "Firmware Drivers" config EDD tristate "BIOS Enhanced Disk Drive calls determine boot disk" - depends on !IA64 + depends on X86 help Say Y or M here if you want to enable BIOS Enhanced Disk Drive Services real mode BIOS calls to determine which disk @@ -28,7 +28,7 @@ config EDD_OFF config FIRMWARE_MEMMAP bool "Add firmware-provided memory map to sysfs" if EMBEDDED - default (X86_64 || X86_32) + default X86 help Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap. That memory map is used for example by kexec to set up parameter area diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 3a2ccb09e2f..31b983d9462 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -169,10 +169,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde if (!s) return; - sprintf(s, - "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X", - d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], - d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]); + sprintf(s, "%pUB", d); dmi_ident[slot] = s; } diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 2ad0128c63c..57ca339924e 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -174,6 +174,16 @@ config GPIO_ADP5520 comment "PCI GPIO expanders:" +config GPIO_CS5535 + tristate "AMD CS5535/CS5536 GPIO support" + depends on PCI && !CS5535_GPIO + help + The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that + can be used for quite a number of things. The CS5535/6 is found on + AMD Geode and Lemote Yeeloong devices. + + If unsure, say N. + config GPIO_BT8XX tristate "BT8XX GPIO abuser" depends on PCI && VIDEO_BT848=n diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 00a532c9a1e..270b6d7839f 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_PL061) += pl061.o obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o +obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c new file mode 100644 index 00000000000..0fdbe94f24a --- /dev/null +++ b/drivers/gpio/cs5535-gpio.c @@ -0,0 +1,355 @@ +/* + * AMD CS5535/CS5536 GPIO driver + * Copyright (C) 2006 Advanced Micro Devices, Inc. + * Copyright (C) 2007-2009 Andres Salomon <dilinger@collabora.co.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/gpio.h> +#include <linux/io.h> +#include <linux/cs5535.h> + +#define DRV_NAME "cs5535-gpio" +#define GPIO_BAR 1 + +/* + * Some GPIO pins + * 31-29,23 : reserved (always mask out) + * 28 : Power Button + * 26 : PME# + * 22-16 : LPC + * 14,15 : SMBus + * 9,8 : UART1 + * 7 : PCI INTB + * 3,4 : UART2/DDC + * 2 : IDE_IRQ0 + * 1 : AC_BEEP + * 0 : PCI INTA + * + * If a mask was not specified, allow all except + * reserved and Power Button + */ +#define GPIO_DEFAULT_MASK 0x0F7FFFFF + +static ulong mask = GPIO_DEFAULT_MASK; +module_param_named(mask, mask, ulong, 0444); +MODULE_PARM_DESC(mask, "GPIO channel mask."); + +static struct cs5535_gpio_chip { + struct gpio_chip chip; + resource_size_t base; + + struct pci_dev *pdev; + spinlock_t lock; +} cs5535_gpio_chip; + +/* + * The CS5535/CS5536 GPIOs support a number of extra features not defined + * by the gpio_chip API, so these are exported. For a full list of the + * registers, see include/linux/cs5535.h. + */ + +static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, + unsigned int reg) +{ + if (offset < 16) + /* low bank register */ + outl(1 << offset, chip->base + reg); + else + /* high bank register */ + outl(1 << (offset - 16), chip->base + 0x80 + reg); +} + +void cs5535_gpio_set(unsigned offset, unsigned int reg) +{ + struct cs5535_gpio_chip *chip = &cs5535_gpio_chip; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + __cs5535_gpio_set(chip, offset, reg); + spin_unlock_irqrestore(&chip->lock, flags); +} +EXPORT_SYMBOL_GPL(cs5535_gpio_set); + +static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset, + unsigned int reg) +{ + if (offset < 16) + /* low bank register */ + outl(1 << (offset + 16), chip->base + reg); + else + /* high bank register */ + outl(1 << offset, chip->base + 0x80 + reg); +} + +void cs5535_gpio_clear(unsigned offset, unsigned int reg) +{ + struct cs5535_gpio_chip *chip = &cs5535_gpio_chip; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + __cs5535_gpio_clear(chip, offset, reg); + spin_unlock_irqrestore(&chip->lock, flags); +} +EXPORT_SYMBOL_GPL(cs5535_gpio_clear); + +int cs5535_gpio_isset(unsigned offset, unsigned int reg) +{ + struct cs5535_gpio_chip *chip = &cs5535_gpio_chip; + unsigned long flags; + long val; + + spin_lock_irqsave(&chip->lock, flags); + if (offset < 16) + /* low bank register */ + val = inl(chip->base + reg); + else { + /* high bank register */ + val = inl(chip->base + 0x80 + reg); + offset -= 16; + } + spin_unlock_irqrestore(&chip->lock, flags); + + return (val & (1 << offset)) ? 1 : 0; +} +EXPORT_SYMBOL_GPL(cs5535_gpio_isset); + +/* + * Generic gpio_chip API support. + */ + +static int chip_gpio_request(struct gpio_chip *c, unsigned offset) +{ + struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + /* check if this pin is available */ + if ((mask & (1 << offset)) == 0) { + dev_info(&chip->pdev->dev, + "pin %u is not available (check mask)\n", offset); + spin_unlock_irqrestore(&chip->lock, flags); + return -EINVAL; + } + + /* disable output aux 1 & 2 on this pin */ + __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX1); + __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX2); + + /* disable input aux 1 on this pin */ + __cs5535_gpio_clear(chip, offset, GPIO_INPUT_AUX1); + + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int chip_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + return cs5535_gpio_isset(offset, GPIO_OUTPUT_VAL); +} + +static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +{ + if (val) + cs5535_gpio_set(offset, GPIO_OUTPUT_VAL); + else + cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL); +} + +static int chip_direction_input(struct gpio_chip *c, unsigned offset) +{ + struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + __cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE); + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val) +{ + struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE); + if (val) + __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL); + else + __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_VAL); + + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static char *cs5535_gpio_names[] = { + "GPIO0", "GPIO1", "GPIO2", "GPIO3", + "GPIO4", "GPIO5", "GPIO6", "GPIO7", + "GPIO8", "GPIO9", "GPIO10", "GPIO11", + "GPIO12", "GPIO13", "GPIO14", "GPIO15", + "GPIO16", "GPIO17", "GPIO18", "GPIO19", + "GPIO20", "GPIO21", "GPIO22", NULL, + "GPIO24", "GPIO25", "GPIO26", "GPIO27", + "GPIO28", NULL, NULL, NULL, +}; + +static struct cs5535_gpio_chip cs5535_gpio_chip = { + .chip = { + .owner = THIS_MODULE, + .label = DRV_NAME, + + .base = 0, + .ngpio = 32, + .names = cs5535_gpio_names, + .request = chip_gpio_request, + + .get = chip_gpio_get, + .set = chip_gpio_set, + + .direction_input = chip_direction_input, + .direction_output = chip_direction_output, + }, +}; + +static int __init cs5535_gpio_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + int err; + ulong mask_orig = mask; + + /* There are two ways to get the GPIO base address; one is by + * fetching it from MSR_LBAR_GPIO, the other is by reading the + * PCI BAR info. The latter method is easier (especially across + * different architectures), so we'll stick with that for now. If + * it turns out to be unreliable in the face of crappy BIOSes, we + * can always go back to using MSRs.. */ + + err = pci_enable_device_io(pdev); + if (err) { + dev_err(&pdev->dev, "can't enable device IO\n"); + goto done; + } + + err = pci_request_region(pdev, GPIO_BAR, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR); + goto done; + } + + /* set up the driver-specific struct */ + cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR); + cs5535_gpio_chip.pdev = pdev; + spin_lock_init(&cs5535_gpio_chip.lock); + + dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR, + (unsigned long long) cs5535_gpio_chip.base); + + /* mask out reserved pins */ + mask &= 0x1F7FFFFF; + + /* do not allow pin 28, Power Button, as there's special handling + * in the PMC needed. (note 12, p. 48) */ + mask &= ~(1 << 28); + + if (mask_orig != mask) + dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n", + mask_orig, mask); + + /* finally, register with the generic GPIO API */ + err = gpiochip_add(&cs5535_gpio_chip.chip); + if (err) + goto release_region; + + dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n"); + return 0; + +release_region: + pci_release_region(pdev, GPIO_BAR); +done: + return err; +} + +static void __exit cs5535_gpio_remove(struct pci_dev *pdev) +{ + int err; + + err = gpiochip_remove(&cs5535_gpio_chip.chip); + if (err) { + /* uhh? */ + dev_err(&pdev->dev, "unable to remove gpio_chip?\n"); + } + pci_release_region(pdev, GPIO_BAR); +} + +static struct pci_device_id cs5535_gpio_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl); + +/* + * We can't use the standard PCI driver registration stuff here, since + * that allows only one driver to bind to each PCI device (and we want + * multiple drivers to be able to bind to the device). Instead, manually + * scan for the PCI device, request a single region, and keep track of the + * devices that we're using. + */ + +static int __init cs5535_gpio_scan_pci(void) +{ + struct pci_dev *pdev; + int err = -ENODEV; + int i; + + for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) { + pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor, + cs5535_gpio_pci_tbl[i].device, NULL); + if (pdev) { + err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]); + if (err) + pci_dev_put(pdev); + + /* we only support a single CS5535/6 southbridge */ + break; + } + } + + return err; +} + +static void __exit cs5535_gpio_free_pci(void) +{ + cs5535_gpio_remove(cs5535_gpio_chip.pdev); + pci_dev_put(cs5535_gpio_chip.pdev); +} + +static int __init cs5535_gpio_init(void) +{ + return cs5535_gpio_scan_pci(); +} + +static void __exit cs5535_gpio_exit(void) +{ + cs5535_gpio_free_pci(); +} + +module_init(cs5535_gpio_init); +module_exit(cs5535_gpio_exit); + +MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>"); +MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 9e640c62ebd..95ccbe377f9 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1046,25 +1046,27 @@ config SENSORS_ATK0110 will be called asus_atk0110. config SENSORS_LIS3LV02D - tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer" + tristate "STMicroeletronics LIS3* three-axis digital accelerometer" depends on INPUT select INPUT_POLLDEV select NEW_LEDS select LEDS_CLASS default n help - This driver provides support for the LIS3LV02Dx accelerometer. In - particular, it can be found in a number of HP laptops, which have the - "Mobile Data Protection System 3D" or "3D DriveGuard" feature. On such - systems the driver should load automatically (via ACPI). The - accelerometer might also be found in other systems, connected via SPI - or I2C. The accelerometer data is readable via - /sys/devices/platform/lis3lv02d. + This driver provides support for the LIS3* accelerometers, such as the + LIS3LV02DL or the LIS331DL. In particular, it can be found in a number + of HP laptops, which have the "Mobile Data Protection System 3D" or + "3D DriveGuard" feature. On such systems the driver should load + automatically (via ACPI alias). The accelerometer might also be found + in other systems, connected via SPI or I2C. The accelerometer data is + readable via /sys/devices/platform/lis3lv02d. This driver also provides an absolute input class device, allowing - the laptop to act as a pinball machine-esque joystick. On HP laptops, + a laptop to act as a pinball machine-esque joystick. It provides also + a misc device which can be used to detect free-fall. On HP laptops, if the led infrastructure is activated, support for a led indicating - disk protection will be provided as hp:red:hddprotection. + disk protection will be provided as hp::hddprotect. For more + information on the feature, refer to Documentation/hwmon/lis3lv02d. This driver can also be built as modules. If so, the core module will be called lis3lv02d and a specific module for HP laptops will be diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 33acf29531a..1ad0a885c5a 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -34,9 +34,8 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, - mc1066); +enum chips { + adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 }; /* adm1021 constants specified below */ @@ -97,7 +96,7 @@ struct adm1021_data { static int adm1021_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1021_detect(struct i2c_client *client, int kind, +static int adm1021_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm1021_init_client(struct i2c_client *client); static int adm1021_remove(struct i2c_client *client); @@ -130,7 +129,7 @@ static struct i2c_driver adm1021_driver = { .remove = adm1021_remove, .id_table = adm1021_id, .detect = adm1021_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static ssize_t show_temp(struct device *dev, @@ -284,7 +283,7 @@ static const struct attribute_group adm1021_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1021_detect(struct i2c_client *client, int kind, +static int adm1021_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c index db6ac2b04f6..251b63165e2 100644 --- a/drivers/hwmon/adm1025.c +++ b/drivers/hwmon/adm1025.c @@ -64,11 +64,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_2(adm1025, ne1619); +enum chips { adm1025, ne1619 }; /* * The ADM1025 registers @@ -111,7 +107,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 }; static int adm1025_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1025_detect(struct i2c_client *client, int kind, +static int adm1025_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm1025_init_client(struct i2c_client *client); static int adm1025_remove(struct i2c_client *client); @@ -137,7 +133,7 @@ static struct i2c_driver adm1025_driver = { .remove = adm1025_remove, .id_table = adm1025_id, .detect = adm1025_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -409,7 +405,7 @@ static const struct attribute_group adm1025_group_in4 = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1025_detect(struct i2c_client *client, int kind, +static int adm1025_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index fb5363985e2..65335b268fa 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -37,9 +37,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adm1026); - static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -293,7 +290,7 @@ struct adm1026_data { static int adm1026_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1026_detect(struct i2c_client *client, int kind, +static int adm1026_detect(struct i2c_client *client, struct i2c_board_info *info); static int adm1026_remove(struct i2c_client *client); static int adm1026_read_value(struct i2c_client *client, u8 reg); @@ -305,7 +302,7 @@ static void adm1026_init_client(struct i2c_client *client); static const struct i2c_device_id adm1026_id[] = { - { "adm1026", adm1026 }, + { "adm1026", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adm1026_id); @@ -319,7 +316,7 @@ static struct i2c_driver adm1026_driver = { .remove = adm1026_remove, .id_table = adm1026_id, .detect = adm1026_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int adm1026_read_value(struct i2c_client *client, u8 reg) @@ -1650,7 +1647,7 @@ static const struct attribute_group adm1026_group_in8_9 = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1026_detect(struct i2c_client *client, int kind, +static int adm1026_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index ef91e2a4a56..0b8a3b145bd 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -44,12 +44,6 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, }; /* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(adm1029); - -/* * The ADM1029 registers * Manufacturer ID is 0x41 for Analog Devices */ @@ -117,7 +111,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = { static int adm1029_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1029_detect(struct i2c_client *client, int kind, +static int adm1029_detect(struct i2c_client *client, struct i2c_board_info *info); static int adm1029_remove(struct i2c_client *client); static struct adm1029_data *adm1029_update_device(struct device *dev); @@ -128,7 +122,7 @@ static int adm1029_init_client(struct i2c_client *client); */ static const struct i2c_device_id adm1029_id[] = { - { "adm1029", adm1029 }, + { "adm1029", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adm1029_id); @@ -142,7 +136,7 @@ static struct i2c_driver adm1029_driver = { .remove = adm1029_remove, .id_table = adm1029_id, .detect = adm1029_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -297,7 +291,7 @@ static const struct attribute_group adm1029_group = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1029_detect(struct i2c_client *client, int kind, +static int adm1029_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 0e722175aae..1644b92e7cc 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -64,8 +64,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(adm1030, adm1031); +enum chips { adm1030, adm1031 }; typedef u8 auto_chan_table_t[8][2]; @@ -102,7 +101,7 @@ struct adm1031_data { static int adm1031_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1031_detect(struct i2c_client *client, int kind, +static int adm1031_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm1031_init_client(struct i2c_client *client); static int adm1031_remove(struct i2c_client *client); @@ -125,7 +124,7 @@ static struct i2c_driver adm1031_driver = { .remove = adm1031_remove, .id_table = adm1031_id, .detect = adm1031_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg) @@ -813,7 +812,7 @@ static const struct attribute_group adm1031_group_opt = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1031_detect(struct i2c_client *client, int kind, +static int adm1031_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index 20e0481cc20..0727ad25079 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -55,8 +55,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_3(adm9240, ds1780, lm81); +enum chips { adm9240, ds1780, lm81 }; /* ADM9240 registers */ #define ADM9240_REG_MAN_ID 0x3e @@ -132,7 +131,7 @@ static inline unsigned int AOUT_FROM_REG(u8 reg) static int adm9240_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm9240_detect(struct i2c_client *client, int kind, +static int adm9240_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm9240_init_client(struct i2c_client *client); static int adm9240_remove(struct i2c_client *client); @@ -156,7 +155,7 @@ static struct i2c_driver adm9240_driver = { .remove = adm9240_remove, .id_table = adm9240_id, .detect = adm9240_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* per client data */ @@ -545,7 +544,7 @@ static const struct attribute_group adm9240_group = { /*** sensor chip detect and driver install ***/ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm9240_detect(struct i2c_client *new_client, int kind, +static int adm9240_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index 451977bca7d..aac85f3aed5 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -47,10 +47,7 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ads7828); - -/* Other module parameters */ +/* Module parameters */ static int se_input = 1; /* Default is SE, 0 == diff */ static int int_vref = 1; /* Default is internal ref ON */ static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */ @@ -72,7 +69,7 @@ struct ads7828_data { }; /* Function declaration - necessary due to function dependencies */ -static int ads7828_detect(struct i2c_client *client, int kind, +static int ads7828_detect(struct i2c_client *client, struct i2c_board_info *info); static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -168,7 +165,7 @@ static int ads7828_remove(struct i2c_client *client) } static const struct i2c_device_id ads7828_id[] = { - { "ads7828", ads7828 }, + { "ads7828", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ads7828_id); @@ -183,11 +180,11 @@ static struct i2c_driver ads7828_driver = { .remove = ads7828_remove, .id_table = ads7828_id, .detect = ads7828_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int ads7828_detect(struct i2c_client *client, int kind, +static int ads7828_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index f9c9562b6a9..a1a7ef14b51 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -32,9 +32,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adt7462); - /* ADT7462 registers */ #define ADT7462_REG_DEVICE 0x3D #define ADT7462_REG_VENDOR 0x3E @@ -237,12 +234,12 @@ struct adt7462_data { static int adt7462_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adt7462_detect(struct i2c_client *client, int kind, +static int adt7462_detect(struct i2c_client *client, struct i2c_board_info *info); static int adt7462_remove(struct i2c_client *client); static const struct i2c_device_id adt7462_id[] = { - { "adt7462", adt7462 }, + { "adt7462", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adt7462_id); @@ -256,7 +253,7 @@ static struct i2c_driver adt7462_driver = { .remove = adt7462_remove, .id_table = adt7462_id, .detect = adt7462_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -1902,7 +1899,7 @@ static struct attribute *adt7462_attr[] = }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adt7462_detect(struct i2c_client *client, int kind, +static int adt7462_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 32b1750a689..3445ce1cba8 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -33,9 +33,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adt7470); - /* ADT7470 registers */ #define ADT7470_REG_BASE_ADDR 0x20 #define ADT7470_REG_TEMP_BASE_ADDR 0x20 @@ -177,12 +174,12 @@ struct adt7470_data { static int adt7470_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adt7470_detect(struct i2c_client *client, int kind, +static int adt7470_detect(struct i2c_client *client, struct i2c_board_info *info); static int adt7470_remove(struct i2c_client *client); static const struct i2c_device_id adt7470_id[] = { - { "adt7470", adt7470 }, + { "adt7470", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adt7470_id); @@ -196,7 +193,7 @@ static struct i2c_driver adt7470_driver = { .remove = adt7470_remove, .id_table = adt7470_id, .detect = adt7470_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -1225,7 +1222,7 @@ static struct attribute *adt7470_attr[] = }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adt7470_detect(struct i2c_client *client, int kind, +static int adt7470_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c index aea244db974..434576f61c8 100644 --- a/drivers/hwmon/adt7473.c +++ b/drivers/hwmon/adt7473.c @@ -32,9 +32,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adt7473); - /* ADT7473 registers */ #define ADT7473_REG_BASE_ADDR 0x20 @@ -166,12 +163,12 @@ struct adt7473_data { static int adt7473_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adt7473_detect(struct i2c_client *client, int kind, +static int adt7473_detect(struct i2c_client *client, struct i2c_board_info *info); static int adt7473_remove(struct i2c_client *client); static const struct i2c_device_id adt7473_id[] = { - { "adt7473", adt7473 }, + { "adt7473", 0 }, { } }; @@ -184,7 +181,7 @@ static struct i2c_driver adt7473_driver = { .remove = adt7473_remove, .id_table = adt7473_id, .detect = adt7473_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -1085,7 +1082,7 @@ static struct attribute *adt7473_attr[] = }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adt7473_detect(struct i2c_client *client, int kind, +static int adt7473_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 99abfddedbc..a0c38514568 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -148,7 +148,7 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -I2C_CLIENT_INSMOD_4(adt7473, adt7475, adt7476, adt7490); +enum chips { adt7473, adt7475, adt7476, adt7490 }; static const struct i2c_device_id adt7475_id[] = { { "adt7473", adt7473 }, @@ -1172,7 +1172,7 @@ static struct attribute_group in4_attr_group = { .attrs = in4_attrs }; static struct attribute_group in5_attr_group = { .attrs = in5_attrs }; static struct attribute_group vid_attr_group = { .attrs = vid_attrs }; -static int adt7475_detect(struct i2c_client *client, int kind, +static int adt7475_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; @@ -1412,7 +1412,7 @@ static struct i2c_driver adt7475_driver = { .remove = adt7475_remove, .id_table = adt7475_id, .detect = adt7475_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static void adt7475_read_hystersis(struct i2c_client *client) diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 7ea6a8f6605..c1605b528e8 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -518,7 +518,7 @@ static int applesmc_pm_restore(struct device *dev) return applesmc_pm_resume(dev); } -static struct dev_pm_ops applesmc_pm_ops = { +static const struct dev_pm_ops applesmc_pm_ops = { .resume = applesmc_pm_resume, .restore = applesmc_pm_restore, }; diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c index 480f80ea1fa..7dada559b3a 100644 --- a/drivers/hwmon/asb100.c +++ b/drivers/hwmon/asb100.c @@ -51,9 +51,6 @@ /* I2C addresses to scan */ static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(asb100); - static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " @@ -209,14 +206,14 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val); static int asb100_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int asb100_detect(struct i2c_client *client, int kind, +static int asb100_detect(struct i2c_client *client, struct i2c_board_info *info); static int asb100_remove(struct i2c_client *client); static struct asb100_data *asb100_update_device(struct device *dev); static void asb100_init_client(struct i2c_client *client); static const struct i2c_device_id asb100_id[] = { - { "asb100", asb100 }, + { "asb100", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, asb100_id); @@ -230,7 +227,7 @@ static struct i2c_driver asb100_driver = { .remove = asb100_remove, .id_table = asb100_id, .detect = asb100_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* 7 Voltages */ @@ -697,7 +694,7 @@ ERROR_SC_2: } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int asb100_detect(struct i2c_client *client, int kind, +static int asb100_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c index d6b490d3e36..94cadc19f0c 100644 --- a/drivers/hwmon/atxp1.c +++ b/drivers/hwmon/atxp1.c @@ -44,17 +44,14 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>"); static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; -I2C_CLIENT_INSMOD_1(atxp1); - static int atxp1_probe(struct i2c_client *client, const struct i2c_device_id *id); static int atxp1_remove(struct i2c_client *client); static struct atxp1_data * atxp1_update_device(struct device *dev); -static int atxp1_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info); static const struct i2c_device_id atxp1_id[] = { - { "atxp1", atxp1 }, + { "atxp1", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, atxp1_id); @@ -68,7 +65,7 @@ static struct i2c_driver atxp1_driver = { .remove = atxp1_remove, .id_table = atxp1_id, .detect = atxp1_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; struct atxp1_data { @@ -275,7 +272,7 @@ static const struct attribute_group atxp1_group = { /* Return 0 if detection is successful, -ENODEV otherwise */ -static int atxp1_detect(struct i2c_client *new_client, int kind, +static int atxp1_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 4377bb0cc52..823dd28a902 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -57,11 +57,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC " /* Addresses to scan */ static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(dme1737, sch5027); - -/* ISA chip types */ -enum isa_chips { sch311x = sch5027 + 1 }; +enum chips { dme1737, sch5027, sch311x }; /* --------------------------------------------------------------------- * Registers @@ -2208,7 +2204,7 @@ exit: } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int dme1737_i2c_detect(struct i2c_client *client, int kind, +static int dme1737_i2c_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; @@ -2318,7 +2314,7 @@ static struct i2c_driver dme1737_i2c_driver = { .remove = dme1737_i2c_remove, .id_table = dme1737_id, .detect = dme1737_i2c_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* --------------------------------------------------------------------- diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index 2a4c6a05b14..e11363467a8 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -38,7 +38,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ds1621); static int polarity = -1; module_param(polarity, int, 0); MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low"); @@ -224,7 +223,7 @@ static const struct attribute_group ds1621_group = { /* Return 0 if detection is successful, -ENODEV otherwise */ -static int ds1621_detect(struct i2c_client *client, int kind, +static int ds1621_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; @@ -305,8 +304,8 @@ static int ds1621_remove(struct i2c_client *client) } static const struct i2c_device_id ds1621_id[] = { - { "ds1621", ds1621 }, - { "ds1625", ds1621 }, + { "ds1621", 0 }, + { "ds1625", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ds1621_id); @@ -321,7 +320,7 @@ static struct i2c_driver ds1621_driver = { .remove = ds1621_remove, .id_table = ds1621_id, .detect = ds1621_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init ds1621_init(void) diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 40dfbcd3f3f..277398f9c93 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -39,8 +39,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(f75373, f75375); +enum chips { f75373, f75375 }; /* Fintek F75375 registers */ #define F75375_REG_CONFIG0 0x0 @@ -113,7 +112,7 @@ struct f75375_data { s8 temp_max_hyst[2]; }; -static int f75375_detect(struct i2c_client *client, int kind, +static int f75375_detect(struct i2c_client *client, struct i2c_board_info *info); static int f75375_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -135,7 +134,7 @@ static struct i2c_driver f75375_driver = { .remove = f75375_remove, .id_table = f75375_id, .detect = f75375_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static inline int f75375_read8(struct i2c_client *client, u8 reg) @@ -677,7 +676,7 @@ static int f75375_remove(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int f75375_detect(struct i2c_client *client, int kind, +static int f75375_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index 281829cd153..bd0fc67e804 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -56,7 +56,8 @@ static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -I2C_CLIENT_INSMOD_7(fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl); + +enum chips { fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl }; /* * The FSCHMD registers and other defines @@ -221,7 +222,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 }; static int fschmd_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int fschmd_detect(struct i2c_client *client, int kind, +static int fschmd_detect(struct i2c_client *client, struct i2c_board_info *info); static int fschmd_remove(struct i2c_client *client); static struct fschmd_data *fschmd_update_device(struct device *dev); @@ -251,7 +252,7 @@ static struct i2c_driver fschmd_driver = { .remove = fschmd_remove, .id_table = fschmd_id, .detect = fschmd_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -1000,7 +1001,7 @@ static void fschmd_dmi_decode(const struct dmi_header *header, void *dummy) } } -static int fschmd_detect(struct i2c_client *client, int _kind, +static int fschmd_detect(struct i2c_client *client, struct i2c_board_info *info) { enum chips kind; diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c index 1d69458aa0b..e7ae5743e18 100644 --- a/drivers/hwmon/gl518sm.c +++ b/drivers/hwmon/gl518sm.c @@ -46,8 +46,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80); +enum chips { gl518sm_r00, gl518sm_r80 }; /* Many GL518 constants specified below */ @@ -139,8 +138,7 @@ struct gl518_data { static int gl518_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int gl518_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info); static void gl518_init_client(struct i2c_client *client); static int gl518_remove(struct i2c_client *client); static int gl518_read_value(struct i2c_client *client, u8 reg); @@ -163,7 +161,7 @@ static struct i2c_driver gl518_driver = { .remove = gl518_remove, .id_table = gl518_id, .detect = gl518_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -484,8 +482,7 @@ static const struct attribute_group gl518_group_r80 = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int gl518_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int rev; diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c index 92b5720ceaf..ec588026f0a 100644 --- a/drivers/hwmon/gl520sm.c +++ b/drivers/hwmon/gl520sm.c @@ -41,9 +41,6 @@ MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=tempe /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(gl520sm); - /* Many GL520 constants specified below One of the inputs can be configured as either temp or voltage. That's why _TEMP2 and _IN4 access the same register @@ -81,8 +78,7 @@ static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 }; static int gl520_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int gl520_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info); static void gl520_init_client(struct i2c_client *client); static int gl520_remove(struct i2c_client *client); static int gl520_read_value(struct i2c_client *client, u8 reg); @@ -91,7 +87,7 @@ static struct gl520_data *gl520_update_device(struct device *dev); /* Driver data */ static const struct i2c_device_id gl520_id[] = { - { "gl520sm", gl520sm }, + { "gl520sm", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, gl520_id); @@ -105,7 +101,7 @@ static struct i2c_driver gl520_driver = { .remove = gl520_remove, .id_table = gl520_id, .detect = gl520_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* Client data */ @@ -681,8 +677,7 @@ static const struct attribute_group gl520_group_opt = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int gl520_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index cf5afb9a10a..b2f2277cad3 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c @@ -43,13 +43,30 @@ #define MDPS_POLL_INTERVAL 50 /* * The sensor can also generate interrupts (DRDY) but it's pretty pointless - * because their are generated even if the data do not change. So it's better + * because they are generated even if the data do not change. So it's better * to keep the interrupt for the free-fall event. The values are updated at * 40Hz (at the lowest frequency), but as it can be pretty time consuming on * some low processor, we poll the sensor only at 20Hz... enough for the * joystick. */ +#define LIS3_PWRON_DELAY_WAI_12B (5000) +#define LIS3_PWRON_DELAY_WAI_8B (3000) + +/* + * LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG + * LIS302D spec says: 18 mG / digit + * LIS3_ACCURACY is used to increase accuracy of the intermediate + * calculation results. + */ +#define LIS3_ACCURACY 1024 +/* Sensitivity values for -2G +2G scale */ +#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024) +#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY) + +#define LIS3_DEFAULT_FUZZ 3 +#define LIS3_DEFAULT_FLAT 3 + struct lis3lv02d lis3_dev = { .misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait), }; @@ -65,7 +82,7 @@ static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg) return lo; } -static s16 lis3lv02d_read_16(struct lis3lv02d *lis3, int reg) +static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg) { u8 lo, hi; @@ -102,16 +119,106 @@ static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3]) static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z) { int position[3]; + int i; + mutex_lock(&lis3->mutex); position[0] = lis3->read_data(lis3, OUTX); position[1] = lis3->read_data(lis3, OUTY); position[2] = lis3->read_data(lis3, OUTZ); + mutex_unlock(&lis3->mutex); + + for (i = 0; i < 3; i++) + position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY; *x = lis3lv02d_get_axis(lis3->ac.x, position); *y = lis3lv02d_get_axis(lis3->ac.y, position); *z = lis3lv02d_get_axis(lis3->ac.z, position); } +/* conversion btw sampling rate and the register values */ +static int lis3_12_rates[4] = {40, 160, 640, 2560}; +static int lis3_8_rates[2] = {100, 400}; + +/* ODR is Output Data Rate */ +static int lis3lv02d_get_odr(void) +{ + u8 ctrl; + int shift; + + lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl); + ctrl &= lis3_dev.odr_mask; + shift = ffs(lis3_dev.odr_mask) - 1; + return lis3_dev.odrs[(ctrl >> shift)]; +} + +static int lis3lv02d_set_odr(int rate) +{ + u8 ctrl; + int i, len, shift; + + lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl); + ctrl &= ~lis3_dev.odr_mask; + len = 1 << hweight_long(lis3_dev.odr_mask); /* # of possible values */ + shift = ffs(lis3_dev.odr_mask) - 1; + + for (i = 0; i < len; i++) + if (lis3_dev.odrs[i] == rate) { + lis3_dev.write(&lis3_dev, CTRL_REG1, + ctrl | (i << shift)); + return 0; + } + return -EINVAL; +} + +static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3]) +{ + u8 reg; + s16 x, y, z; + u8 selftest; + int ret; + + mutex_lock(&lis3->mutex); + if (lis3_dev.whoami == WAI_12B) + selftest = CTRL1_ST; + else + selftest = CTRL1_STP; + + lis3->read(lis3, CTRL_REG1, ®); + lis3->write(lis3, CTRL_REG1, (reg | selftest)); + msleep(lis3->pwron_delay / lis3lv02d_get_odr()); + + /* Read directly to avoid axis remap */ + x = lis3->read_data(lis3, OUTX); + y = lis3->read_data(lis3, OUTY); + z = lis3->read_data(lis3, OUTZ); + + /* back to normal settings */ + lis3->write(lis3, CTRL_REG1, reg); + msleep(lis3->pwron_delay / lis3lv02d_get_odr()); + + results[0] = x - lis3->read_data(lis3, OUTX); + results[1] = y - lis3->read_data(lis3, OUTY); + results[2] = z - lis3->read_data(lis3, OUTZ); + + ret = 0; + if (lis3->pdata) { + int i; + for (i = 0; i < 3; i++) { + /* Check against selftest acceptance limits */ + if ((results[i] < lis3->pdata->st_min_limits[i]) || + (results[i] > lis3->pdata->st_max_limits[i])) { + ret = -EIO; + goto fail; + } + } + } + + /* test passed */ +fail: + mutex_unlock(&lis3->mutex); + return ret; +} + void lis3lv02d_poweroff(struct lis3lv02d *lis3) { /* disable X,Y,Z axis and power down */ @@ -125,14 +232,19 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) lis3->init(lis3); + /* LIS3 power on delay is quite long */ + msleep(lis3->pwron_delay / lis3lv02d_get_odr()); + /* * Common configuration - * BDU: LSB and MSB values are not updated until both have been read. - * So the value read will always be correct. + * BDU: (12 bits sensors only) LSB and MSB values are not updated until + * both have been read. So the value read will always be correct. */ - lis3->read(lis3, CTRL_REG2, ®); - reg |= CTRL2_BDU; - lis3->write(lis3, CTRL_REG2, reg); + if (lis3->whoami == WAI_12B) { + lis3->read(lis3, CTRL_REG2, ®); + reg |= CTRL2_BDU; + lis3->write(lis3, CTRL_REG2, reg); + } } EXPORT_SYMBOL_GPL(lis3lv02d_poweron); @@ -273,22 +385,17 @@ static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev) int x, y, z; lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); - input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib); - input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib); - input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib); -} - - -static inline void lis3lv02d_calibrate_joystick(void) -{ - lis3lv02d_get_xyz(&lis3_dev, - &lis3_dev.xcalib, &lis3_dev.ycalib, &lis3_dev.zcalib); + input_report_abs(pidev->input, ABS_X, x); + input_report_abs(pidev->input, ABS_Y, y); + input_report_abs(pidev->input, ABS_Z, z); + input_sync(pidev->input); } int lis3lv02d_joystick_enable(void) { struct input_dev *input_dev; int err; + int max_val, fuzz, flat; if (lis3_dev.idev) return -EINVAL; @@ -301,8 +408,6 @@ int lis3lv02d_joystick_enable(void) lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL; input_dev = lis3_dev.idev->input; - lis3lv02d_calibrate_joystick(); - input_dev->name = "ST LIS3LV02DL Accelerometer"; input_dev->phys = DRIVER_NAME "/input0"; input_dev->id.bustype = BUS_HOST; @@ -310,9 +415,12 @@ int lis3lv02d_joystick_enable(void) input_dev->dev.parent = &lis3_dev.pdev->dev; set_bit(EV_ABS, input_dev->evbit); - input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); - input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); - input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); + max_val = (lis3_dev.mdps_max_val * lis3_dev.scale) / LIS3_ACCURACY; + fuzz = (LIS3_DEFAULT_FUZZ * lis3_dev.scale) / LIS3_ACCURACY; + flat = (LIS3_DEFAULT_FLAT * lis3_dev.scale) / LIS3_ACCURACY; + input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat); + input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat); + input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat); err = input_register_polled_device(lis3_dev.idev); if (err) { @@ -332,11 +440,23 @@ void lis3lv02d_joystick_disable(void) if (lis3_dev.irq) misc_deregister(&lis3lv02d_misc_device); input_unregister_polled_device(lis3_dev.idev); + input_free_polled_device(lis3_dev.idev); lis3_dev.idev = NULL; } EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable); /* Sysfs stuff */ +static ssize_t lis3lv02d_selftest_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int result; + s16 values[3]; + + result = lis3lv02d_selftest(&lis3_dev, values); + return sprintf(buf, "%s %d %d %d\n", result == 0 ? "OK" : "FAIL", + values[0], values[1], values[2]); +} + static ssize_t lis3lv02d_position_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -346,41 +466,35 @@ static ssize_t lis3lv02d_position_show(struct device *dev, return sprintf(buf, "(%d,%d,%d)\n", x, y, z); } -static ssize_t lis3lv02d_calibrate_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t lis3lv02d_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) { - return sprintf(buf, "(%d,%d,%d)\n", lis3_dev.xcalib, lis3_dev.ycalib, lis3_dev.zcalib); + return sprintf(buf, "%d\n", lis3lv02d_get_odr()); } -static ssize_t lis3lv02d_calibrate_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t lis3lv02d_rate_set(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) { - lis3lv02d_calibrate_joystick(); - return count; -} + unsigned long rate; -/* conversion btw sampling rate and the register values */ -static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560}; -static ssize_t lis3lv02d_rate_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u8 ctrl; - int val; + if (strict_strtoul(buf, 0, &rate)) + return -EINVAL; - lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl); - val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4; - return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]); + if (lis3lv02d_set_odr(rate)) + return -EINVAL; + + return count; } +static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL); static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL); -static DEVICE_ATTR(calibrate, S_IRUGO|S_IWUSR, lis3lv02d_calibrate_show, - lis3lv02d_calibrate_store); -static DEVICE_ATTR(rate, S_IRUGO, lis3lv02d_rate_show, NULL); +static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show, + lis3lv02d_rate_set); static struct attribute *lis3lv02d_attributes[] = { + &dev_attr_selftest.attr, &dev_attr_position.attr, - &dev_attr_calibrate.attr, &dev_attr_rate.attr, NULL }; @@ -409,22 +523,30 @@ EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs); /* * Initialise the accelerometer and the various subsystems. - * Should be rather independant of the bus system. + * Should be rather independent of the bus system. */ int lis3lv02d_init_device(struct lis3lv02d *dev) { dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I); switch (dev->whoami) { - case LIS_DOUBLE_ID: - printk(KERN_INFO DRIVER_NAME ": 2-byte sensor found\n"); - dev->read_data = lis3lv02d_read_16; + case WAI_12B: + printk(KERN_INFO DRIVER_NAME ": 12 bits sensor found\n"); + dev->read_data = lis3lv02d_read_12; dev->mdps_max_val = 2048; + dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B; + dev->odrs = lis3_12_rates; + dev->odr_mask = CTRL1_DF0 | CTRL1_DF1; + dev->scale = LIS3_SENSITIVITY_12B; break; - case LIS_SINGLE_ID: - printk(KERN_INFO DRIVER_NAME ": 1-byte sensor found\n"); + case WAI_8B: + printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n"); dev->read_data = lis3lv02d_read_8; dev->mdps_max_val = 128; + dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B; + dev->odrs = lis3_8_rates; + dev->odr_mask = CTRL1_DR; + dev->scale = LIS3_SENSITIVITY_8B; break; default: printk(KERN_ERR DRIVER_NAME @@ -432,6 +554,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) return -EINVAL; } + mutex_init(&dev->mutex); + lis3lv02d_add_fs(dev); lis3lv02d_poweron(dev); @@ -443,7 +567,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) if (dev->pdata) { struct lis3lv02d_platform_data *p = dev->pdata; - if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) { + if (p->click_flags && (dev->whoami == WAI_8B)) { dev->write(dev, CLICK_CFG, p->click_flags); dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit); dev->write(dev, CLICK_LATENCY, p->click_latency); @@ -454,7 +578,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) (p->click_thresh_y << 4)); } - if (p->wakeup_flags && (dev->whoami == LIS_SINGLE_ID)) { + if (p->wakeup_flags && (dev->whoami == WAI_8B)) { dev->write(dev, FF_WU_CFG_1, p->wakeup_flags); dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f); /* default to 2.5ms for now */ @@ -484,4 +608,3 @@ EXPORT_SYMBOL_GPL(lis3lv02d_init_device); MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver"); MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek"); MODULE_LICENSE("GPL"); - diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h index 3e1ff46f72d..e6a01f44709 100644 --- a/drivers/hwmon/lis3lv02d.h +++ b/drivers/hwmon/lis3lv02d.h @@ -2,7 +2,7 @@ * lis3lv02d.h - ST LIS3LV02DL accelerometer driver * * Copyright (C) 2007-2008 Yan Burman - * Copyright (C) 2008 Eric Piel + * Copyright (C) 2008-2009 Eric Piel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,20 +22,18 @@ #include <linux/input-polldev.h> /* - * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to - * be connected via SPI. There exists also several similar chips (such as LIS302DL or - * LIS3L02DQ) and they have slightly different registers, but we can provide a - * common interface for all of them. - * They can also be connected via I²C. + * This driver tries to support the "digital" accelerometer chips from + * STMicroelectronics such as LIS3LV02DL, LIS302DL, LIS3L02DQ, LIS331DL, + * LIS35DE, or LIS202DL. They are very similar in terms of programming, with + * almost the same registers. In addition to differing on physical properties, + * they differ on the number of axes (2/3), precision (8/12 bits), and special + * features (freefall detection, click...). Unfortunately, not all the + * differences can be probed via a register. + * They can be connected either via I²C or SPI. */ #include <linux/lis3lv02d.h> -/* 2-byte registers */ -#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */ -/* 1-byte registers */ -#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */ - enum lis3_reg { WHO_AM_I = 0x0F, OFFSET_X = 0x16, @@ -94,7 +92,13 @@ enum lis3lv02d_reg { DD_THSE_H = 0x3F, }; -enum lis3lv02d_ctrl1 { +enum lis3_who_am_i { + WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */ + WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */ + WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */ +}; + +enum lis3lv02d_ctrl1_12b { CTRL1_Xen = 0x01, CTRL1_Yen = 0x02, CTRL1_Zen = 0x04, @@ -104,6 +108,16 @@ enum lis3lv02d_ctrl1 { CTRL1_PD0 = 0x40, CTRL1_PD1 = 0x80, }; + +/* Delta to ctrl1_12b version */ +enum lis3lv02d_ctrl1_8b { + CTRL1_STM = 0x08, + CTRL1_STP = 0x10, + CTRL1_FS = 0x20, + CTRL1_PD = 0x40, + CTRL1_DR = 0x80, +}; + enum lis3lv02d_ctrl2 { CTRL2_DAS = 0x01, CTRL2_SIM = 0x02, @@ -194,16 +208,20 @@ struct lis3lv02d { int (*write) (struct lis3lv02d *lis3, int reg, u8 val); int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret); - u8 whoami; /* 3Ah: 2-byte registries, 3Bh: 1-byte registries */ + int *odrs; /* Supported output data rates */ + u8 odr_mask; /* ODR bit mask */ + u8 whoami; /* indicates measurement precision */ s16 (*read_data) (struct lis3lv02d *lis3, int reg); int mdps_max_val; + int pwron_delay; + int scale; /* + * relationship between 1 LBS and mG + * (1/1000th of earth gravity) + */ struct input_polled_dev *idev; /* input device */ struct platform_device *pdev; /* platform device */ atomic_t count; /* interrupt count after last read */ - int xcalib; /* calibrated null value for x */ - int ycalib; /* calibrated null value for y */ - int zcalib; /* calibrated null value for z */ struct axis_conversion ac; /* hw -> logical axis */ u32 irq; /* IRQ number */ @@ -212,6 +230,7 @@ struct lis3lv02d { unsigned long misc_opened; /* bit0: whether the device is open */ struct lis3lv02d_platform_data *pdata; /* for passing board config */ + struct mutex mutex; /* Serialize poll and selftest */ }; int lis3lv02d_init_device(struct lis3lv02d *lis3); diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 5da66ab04f7..bf81aff7051 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -56,12 +56,6 @@ static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; /* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(lm63); - -/* * The LM63 registers */ @@ -134,8 +128,7 @@ static int lm63_remove(struct i2c_client *client); static struct lm63_data *lm63_update_device(struct device *dev); -static int lm63_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm63_init_client(struct i2c_client *client); /* @@ -143,7 +136,7 @@ static void lm63_init_client(struct i2c_client *client); */ static const struct i2c_device_id lm63_id[] = { - { "lm63", lm63 }, + { "lm63", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm63_id); @@ -157,7 +150,7 @@ static struct i2c_driver lm63_driver = { .remove = lm63_remove, .id_table = lm63_id, .detect = lm63_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -423,7 +416,7 @@ static const struct attribute_group lm63_group_fan1 = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm63_detect(struct i2c_client *new_client, int kind, +static int lm63_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c index 0bf8b2a8e9f..c5f39ba103c 100644 --- a/drivers/hwmon/lm73.c +++ b/drivers/hwmon/lm73.c @@ -27,9 +27,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm73); - /* LM73 registers */ #define LM73_REG_INPUT 0x00 #define LM73_REG_CONF 0x01 @@ -145,13 +142,13 @@ static int lm73_remove(struct i2c_client *client) } static const struct i2c_device_id lm73_ids[] = { - { "lm73", lm73 }, + { "lm73", 0 }, { /* LIST END */ } }; MODULE_DEVICE_TABLE(i2c, lm73_ids); /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm73_detect(struct i2c_client *new_client, int kind, +static int lm73_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; @@ -182,7 +179,7 @@ static struct i2c_driver lm73_driver = { .remove = lm73_remove, .id_table = lm73_ids, .detect = lm73_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* module glue */ diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index e392548cccb..8ae2cfe2d82 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -32,15 +32,12 @@ /* * This driver handles the LM75 and compatible digital temperature sensors. - * Only types which are _not_ listed in I2C_CLIENT_INSMOD_*() need to be - * listed here. We start at 9 since I2C_CLIENT_INSMOD_*() currently allow - * definition of up to 8 chip types (plus zero). */ enum lm75_type { /* keep sorted in alphabetical order */ - ds1775 = 9, + ds1775, ds75, - /* lm75 -- in I2C_CLIENT_INSMOD_1() */ + lm75, lm75a, max6625, max6626, @@ -58,9 +55,6 @@ enum lm75_type { /* keep sorted in alphabetical order */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm75); - /* The LM75 registers */ #define LM75_REG_CONF 0x01 @@ -234,7 +228,7 @@ static const struct i2c_device_id lm75_ids[] = { MODULE_DEVICE_TABLE(i2c, lm75_ids); /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm75_detect(struct i2c_client *new_client, int kind, +static int lm75_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; @@ -295,7 +289,7 @@ static struct i2c_driver lm75_driver = { .remove = lm75_remove, .id_table = lm75_ids, .detect = lm75_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /*-----------------------------------------------------------------------*/ diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c index ac067fd1948..b28a297be50 100644 --- a/drivers/hwmon/lm77.c +++ b/drivers/hwmon/lm77.c @@ -39,9 +39,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm77); - /* The LM77 registers */ #define LM77_REG_TEMP 0x00 #define LM77_REG_CONF 0x01 @@ -66,8 +63,7 @@ struct lm77_data { static int lm77_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int lm77_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm77_init_client(struct i2c_client *client); static int lm77_remove(struct i2c_client *client); static u16 lm77_read_value(struct i2c_client *client, u8 reg); @@ -77,7 +73,7 @@ static struct lm77_data *lm77_update_device(struct device *dev); static const struct i2c_device_id lm77_id[] = { - { "lm77", lm77 }, + { "lm77", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm77_id); @@ -92,7 +88,7 @@ static struct i2c_driver lm77_driver = { .remove = lm77_remove, .id_table = lm77_id, .detect = lm77_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* straight from the datasheet */ @@ -245,7 +241,7 @@ static const struct attribute_group lm77_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm77_detect(struct i2c_client *new_client, int kind, +static int lm77_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index 5978291cebb..cadcbd90ff3 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -41,8 +41,7 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; static unsigned short isa_address = 0x290; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(lm78, lm79); +enum chips { lm78, lm79 }; /* Many LM78 constants specified below */ @@ -142,7 +141,7 @@ struct lm78_data { }; -static int lm78_i2c_detect(struct i2c_client *client, int kind, +static int lm78_i2c_detect(struct i2c_client *client, struct i2c_board_info *info); static int lm78_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -173,7 +172,7 @@ static struct i2c_driver lm78_driver = { .remove = lm78_i2c_remove, .id_table = lm78_i2c_id, .detect = lm78_i2c_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static struct platform_driver lm78_isa_driver = { @@ -558,7 +557,7 @@ static int lm78_alias_detect(struct i2c_client *client, u8 chipid) return 1; } -static int lm78_i2c_detect(struct i2c_client *client, int kind, +static int lm78_i2c_detect(struct i2c_client *client, struct i2c_board_info *info) { int i; diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index bcffc189940..18a0e6c5fe8 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -35,9 +35,6 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm80); - /* Many LM80 constants specified below */ /* The LM80 registers */ @@ -133,8 +130,7 @@ struct lm80_data { static int lm80_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int lm80_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm80_init_client(struct i2c_client *client); static int lm80_remove(struct i2c_client *client); static struct lm80_data *lm80_update_device(struct device *dev); @@ -146,7 +142,7 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value); */ static const struct i2c_device_id lm80_id[] = { - { "lm80", lm80 }, + { "lm80", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm80_id); @@ -160,7 +156,7 @@ static struct i2c_driver lm80_driver = { .remove = lm80_remove, .id_table = lm80_id, .detect = lm80_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -447,8 +443,7 @@ static const struct attribute_group lm80_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm80_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int i, cur; diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c index 08b03e6ed0b..8290476aee4 100644 --- a/drivers/hwmon/lm83.c +++ b/drivers/hwmon/lm83.c @@ -51,11 +51,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_2(lm83, lm82); +enum chips { lm83, lm82 }; /* * The LM83 registers @@ -118,7 +114,7 @@ static const u8 LM83_REG_W_HIGH[] = { * Functions declaration */ -static int lm83_detect(struct i2c_client *new_client, int kind, +static int lm83_detect(struct i2c_client *new_client, struct i2c_board_info *info); static int lm83_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -145,7 +141,7 @@ static struct i2c_driver lm83_driver = { .remove = lm83_remove, .id_table = lm83_id, .detect = lm83_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -291,7 +287,7 @@ static const struct attribute_group lm83_group_opt = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm83_detect(struct i2c_client *new_client, int kind, +static int lm83_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index d56da2e7470..b3841a61559 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -38,9 +38,11 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100, - emc6d102); +enum chips { + any_chip, lm85b, lm85c, + adm1027, adt7463, adt7468, + emc6d100, emc6d102 +}; /* The LM85 registers */ @@ -323,8 +325,7 @@ struct lm85_data { struct lm85_zone zone[3]; }; -static int lm85_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info); static int lm85_probe(struct i2c_client *client, const struct i2c_device_id *id); static int lm85_remove(struct i2c_client *client); @@ -357,7 +358,7 @@ static struct i2c_driver lm85_driver = { .remove = lm85_remove, .id_table = lm85_id, .detect = lm85_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; @@ -1156,8 +1157,7 @@ static int lm85_is_fake(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm85_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int address = client->addr; diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c index 4929b1815ee..f1e6e7512ff 100644 --- a/drivers/hwmon/lm87.c +++ b/drivers/hwmon/lm87.c @@ -74,11 +74,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_2(lm87, adm1024); +enum chips { lm87, adm1024 }; /* * The LM87 registers @@ -158,7 +154,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C }; static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int lm87_detect(struct i2c_client *new_client, int kind, +static int lm87_detect(struct i2c_client *new_client, struct i2c_board_info *info); static void lm87_init_client(struct i2c_client *client); static int lm87_remove(struct i2c_client *client); @@ -184,7 +180,7 @@ static struct i2c_driver lm87_driver = { .remove = lm87_remove, .id_table = lm87_id, .detect = lm87_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -662,7 +658,7 @@ static const struct attribute_group lm87_group_opt = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm87_detect(struct i2c_client *new_client, int kind, +static int lm87_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index b7c905f50ed..7c9bdc16742 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -93,12 +93,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, - max6646); +enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646 }; /* * The LM90 registers @@ -152,8 +147,7 @@ I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, * Functions declaration */ -static int lm90_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info); static int lm90_probe(struct i2c_client *client, const struct i2c_device_id *id); static void lm90_init_client(struct i2c_client *client); @@ -192,7 +186,7 @@ static struct i2c_driver lm90_driver = { .remove = lm90_remove, .id_table = lm90_id, .detect = lm90_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -656,7 +650,7 @@ static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm90_detect(struct i2c_client *new_client, int kind, +static int lm90_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index 47ac698709d..7c31e6205f8 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -54,9 +54,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm92); - /* The LM92 registers */ #define LM92_REG_CONFIG 0x01 /* 8-bit, RW */ #define LM92_REG_TEMP 0x00 /* 16-bit, RO */ @@ -319,7 +316,7 @@ static const struct attribute_group lm92_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm92_detect(struct i2c_client *new_client, int kind, +static int lm92_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; @@ -401,7 +398,7 @@ static int lm92_remove(struct i2c_client *client) */ static const struct i2c_device_id lm92_id[] = { - { "lm92", lm92 }, + { "lm92", 0 }, /* max6635 could be added here */ { } }; @@ -416,7 +413,7 @@ static struct i2c_driver lm92_driver = { .remove = lm92_remove, .id_table = lm92_id, .detect = lm92_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init sensors_lm92_init(void) diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index 124dd7cea54..6669255aadc 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c @@ -145,7 +145,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm93); static int disable_block; module_param(disable_block, bool, 0); @@ -2501,8 +2500,7 @@ static void lm93_init_client(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm93_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int mfr, ver; @@ -2603,7 +2601,7 @@ static int lm93_remove(struct i2c_client *client) } static const struct i2c_device_id lm93_id[] = { - { "lm93", lm93 }, + { "lm93", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm93_id); @@ -2617,7 +2615,7 @@ static struct i2c_driver lm93_driver = { .remove = lm93_remove, .id_table = lm93_id, .detect = lm93_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init lm93_init(void) diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 906b896cf1d..8fc8eb8cba4 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c @@ -39,9 +39,6 @@ static const unsigned short normal_i2c[] = { 0x19, 0x2a, 0x2b, I2C_CLIENT_END}; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm95241); - /* LM95241 registers */ #define LM95241_REG_R_MAN_ID 0xFE #define LM95241_REG_R_CHIP_ID 0xFF @@ -310,7 +307,7 @@ static const struct attribute_group lm95241_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm95241_detect(struct i2c_client *new_client, int kind, +static int lm95241_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; @@ -446,7 +443,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev) /* Driver data (common to all clients) */ static const struct i2c_device_id lm95241_id[] = { - { "lm95241", lm95241 }, + { "lm95241", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm95241_id); @@ -460,7 +457,7 @@ static struct i2c_driver lm95241_driver = { .remove = lm95241_remove, .id_table = lm95241_id, .detect = lm95241_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init sensors_lm95241_init(void) diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index 7fcf5ff89e7..022ded09810 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -41,12 +41,6 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; /* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(max1619); - -/* * The MAX1619 registers */ @@ -88,7 +82,7 @@ static int temp_to_reg(int val) static int max1619_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int max1619_detect(struct i2c_client *client, int kind, +static int max1619_detect(struct i2c_client *client, struct i2c_board_info *info); static void max1619_init_client(struct i2c_client *client); static int max1619_remove(struct i2c_client *client); @@ -99,7 +93,7 @@ static struct max1619_data *max1619_update_device(struct device *dev); */ static const struct i2c_device_id max1619_id[] = { - { "max1619", max1619 }, + { "max1619", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max1619_id); @@ -113,7 +107,7 @@ static struct i2c_driver max1619_driver = { .remove = max1619_remove, .id_table = max1619_id, .detect = max1619_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -226,7 +220,7 @@ static const struct attribute_group max1619_group = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int max1619_detect(struct i2c_client *client, int kind, +static int max1619_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index 1da561e0cb3..a0160ee5cae 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -62,8 +62,6 @@ module_param(fan_voltage, int, S_IRUGO); module_param(prescaler, int, S_IRUGO); module_param(clock, int, S_IRUGO); -I2C_CLIENT_INSMOD_1(max6650); - /* * MAX 6650/6651 registers */ @@ -116,7 +114,7 @@ I2C_CLIENT_INSMOD_1(max6650); static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int max6650_detect(struct i2c_client *client, int kind, +static int max6650_detect(struct i2c_client *client, struct i2c_board_info *info); static int max6650_init_client(struct i2c_client *client); static int max6650_remove(struct i2c_client *client); @@ -127,7 +125,7 @@ static struct max6650_data *max6650_update_device(struct device *dev); */ static const struct i2c_device_id max6650_id[] = { - { "max6650", max6650 }, + { "max6650", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max6650_id); @@ -141,7 +139,7 @@ static struct i2c_driver max6650_driver = { .remove = max6650_remove, .id_table = max6650_id, .detect = max6650_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -528,7 +526,7 @@ static struct attribute_group max6650_attr_grp = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int max6650_detect(struct i2c_client *client, int kind, +static int max6650_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c index 1d7ffebd679..d4478794985 100644 --- a/drivers/hwmon/pcf8591.c +++ b/drivers/hwmon/pcf8591.c @@ -29,7 +29,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(pcf8591); static int input_mode; module_param(input_mode, int, 0); @@ -169,7 +168,7 @@ static const struct attribute_group pcf8591_attr_group_opt = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int pcf8591_detect(struct i2c_client *client, int kind, +static int pcf8591_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; @@ -299,7 +298,7 @@ static struct i2c_driver pcf8591_driver = { .class = I2C_CLASS_HWMON, /* Nearest choice */ .detect = pcf8591_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init pcf8591_init(void) diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c index 4d88c045781..40b26673d87 100644 --- a/drivers/hwmon/smsc47m192.c +++ b/drivers/hwmon/smsc47m192.c @@ -36,9 +36,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(smsc47m192); - /* SMSC47M192 registers */ #define SMSC47M192_REG_IN(nr) ((nr)<6 ? (0x20 + (nr)) : \ (0x50 + (nr) - 6)) @@ -115,13 +112,13 @@ struct smsc47m192_data { static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int smsc47m192_detect(struct i2c_client *client, int kind, +static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info); static int smsc47m192_remove(struct i2c_client *client); static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); static const struct i2c_device_id smsc47m192_id[] = { - { "smsc47m192", smsc47m192 }, + { "smsc47m192", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, smsc47m192_id); @@ -135,7 +132,7 @@ static struct i2c_driver smsc47m192_driver = { .remove = smsc47m192_remove, .id_table = smsc47m192_id, .detect = smsc47m192_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* Voltages */ @@ -481,7 +478,7 @@ static void smsc47m192_init_client(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int smsc47m192_detect(struct i2c_client *client, int kind, +static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c index 4b793849c73..7dfb4dec4c5 100644 --- a/drivers/hwmon/thmc50.c +++ b/drivers/hwmon/thmc50.c @@ -35,7 +35,7 @@ MODULE_LICENSE("GPL"); static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_2(thmc50, adm1022); +enum chips { thmc50, adm1022 }; static unsigned short adm1022_temp3[16]; static unsigned int adm1022_temp3_num; @@ -84,7 +84,7 @@ struct thmc50_data { u8 alarms; }; -static int thmc50_detect(struct i2c_client *client, int kind, +static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info); static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -108,7 +108,7 @@ static struct i2c_driver thmc50_driver = { .remove = thmc50_remove, .id_table = thmc50_id, .detect = thmc50_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static ssize_t show_analog_out(struct device *dev, @@ -286,7 +286,7 @@ static const struct attribute_group temp3_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int thmc50_detect(struct i2c_client *client, int kind, +static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info) { unsigned company; diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index ee9673467c4..a13b30e8d8d 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -42,8 +42,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(tmp401, tmp411); +enum chips { tmp401, tmp411 }; /* * The TMP401 registers, note some registers have different addresses for @@ -98,7 +97,7 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 }; static int tmp401_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int tmp401_detect(struct i2c_client *client, int kind, +static int tmp401_detect(struct i2c_client *client, struct i2c_board_info *info); static int tmp401_remove(struct i2c_client *client); static struct tmp401_data *tmp401_update_device(struct device *dev); @@ -123,7 +122,7 @@ static struct i2c_driver tmp401_driver = { .remove = tmp401_remove, .id_table = tmp401_id, .detect = tmp401_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -488,7 +487,7 @@ static void tmp401_init_client(struct i2c_client *client) i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config); } -static int tmp401_detect(struct i2c_client *client, int _kind, +static int tmp401_detect(struct i2c_client *client, struct i2c_board_info *info) { enum chips kind; diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index bb5464a289c..4f7c051e2d7 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -39,8 +39,7 @@ static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_3(tmp421, tmp422, tmp423); +enum chips { tmp421, tmp422, tmp423 }; /* The TMP421 registers */ #define TMP421_CONFIG_REG_1 0x09 @@ -223,7 +222,7 @@ static int tmp421_init_client(struct i2c_client *client) return 0; } -static int tmp421_detect(struct i2c_client *client, int _kind, +static int tmp421_detect(struct i2c_client *client, struct i2c_board_info *info) { enum chips kind; @@ -322,7 +321,7 @@ static struct i2c_driver tmp421_driver = { .remove = tmp421_remove, .id_table = tmp421_id, .detect = tmp421_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init tmp421_init(void) diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index bb5e7874878..0dcaba9b718 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -5,6 +5,7 @@ Copyright (C) 2006 Yuan Mu (Winbond), Rudolf Marek <r.marek@assembler.cz> David Hubbard <david.c.hubbard@gmail.com> + Daniel J Blueman <daniel.blueman@gmail.com> Shamelessly ripped from the w83627hf driver Copyright (C) 2003 Mark Studebaker @@ -177,12 +178,15 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 }; #define W83627EHF_REG_ALARM3 0x45B /* SmartFan registers */ +#define W83627EHF_REG_FAN_STEPUP_TIME 0x0f +#define W83627EHF_REG_FAN_STEPDOWN_TIME 0x0e + /* DC or PWM output fan configuration */ static const u8 W83627EHF_REG_PWM_ENABLE[] = { 0x04, /* SYS FAN0 output mode and PWM mode */ 0x04, /* CPU FAN0 output mode and PWM mode */ 0x12, /* AUX FAN mode */ - 0x62, /* CPU fan1 mode */ + 0x62, /* CPU FAN1 mode */ }; static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 }; @@ -193,10 +197,12 @@ static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 }; static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 }; static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 }; - /* Advanced Fan control, some values are common for all fans */ -static const u8 W83627EHF_REG_FAN_MIN_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 }; -static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0C, 0x0D, 0x17, 0x66 }; +static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 }; +static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 }; +static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 }; +static const u8 W83627EHF_REG_FAN_MAX_OUTPUT[] = { 0xff, 0x67, 0xff, 0x69 }; +static const u8 W83627EHF_REG_FAN_STEP_OUTPUT[] = { 0xff, 0x68, 0xff, 0x6a }; /* * Conversions @@ -295,14 +301,19 @@ struct w83627ehf_data { u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */ u8 pwm_enable[4]; /* 1->manual - 2->thermal cruise (also called SmartFan I) */ + 2->thermal cruise mode (also called SmartFan I) + 3->fan speed cruise mode + 4->variable thermal cruise (also called SmartFan III) */ u8 pwm_num; /* number of pwm */ u8 pwm[4]; u8 target_temp[4]; u8 tolerance[4]; - u8 fan_min_output[4]; /* minimum fan speed */ - u8 fan_stop_time[4]; + u8 fan_start_output[4]; /* minimum fan speed when spinning up */ + u8 fan_stop_output[4]; /* minimum fan speed when spinning down */ + u8 fan_stop_time[4]; /* time at minimum before disabling fan */ + u8 fan_max_output[4]; /* maximum fan speed */ + u8 fan_step_output[4]; /* rate of change output value */ u8 vid; u8 vrm; @@ -529,8 +540,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) & 3) + 1; data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]); - data->fan_min_output[i] = w83627ehf_read_value(data, - W83627EHF_REG_FAN_MIN_OUTPUT[i]); + data->fan_start_output[i] = w83627ehf_read_value(data, + W83627EHF_REG_FAN_START_OUTPUT[i]); + data->fan_stop_output[i] = w83627ehf_read_value(data, + W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, W83627EHF_REG_FAN_STOP_TIME[i]); data->target_temp[i] = @@ -976,7 +989,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, u32 val = simple_strtoul(buf, NULL, 10); u16 reg; - if (!val || (val > 2)) /* only modes 1 and 2 are supported */ + if (!val || (val > 4)) return -EINVAL; mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); @@ -1118,7 +1131,10 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ return count; \ } -fan_functions(fan_min_output, FAN_MIN_OUTPUT) +fan_functions(fan_start_output, FAN_START_OUTPUT) +fan_functions(fan_stop_output, FAN_STOP_OUTPUT) +fan_functions(fan_max_output, FAN_MAX_OUTPUT) +fan_functions(fan_step_output, FAN_STEP_OUTPUT) #define fan_time_functions(reg, REG) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ @@ -1161,8 +1177,14 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct sensor_device_attribute sda_sf3_arrays_fan4[] = { SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, store_fan_stop_time, 3), - SENSOR_ATTR(pwm4_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, - store_fan_min_output, 3), + SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, + store_fan_start_output, 3), + SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, + store_fan_stop_output, 3), + SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 3), + SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 3), }; static struct sensor_device_attribute sda_sf3_arrays[] = { @@ -1172,12 +1194,24 @@ static struct sensor_device_attribute sda_sf3_arrays[] = { store_fan_stop_time, 1), SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, store_fan_stop_time, 2), - SENSOR_ATTR(pwm1_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, - store_fan_min_output, 0), - SENSOR_ATTR(pwm2_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, - store_fan_min_output, 1), - SENSOR_ATTR(pwm3_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, - store_fan_min_output, 2), + SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, + store_fan_start_output, 0), + SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, + store_fan_start_output, 1), + SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, + store_fan_start_output, 2), + SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, + store_fan_stop_output, 0), + SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, + store_fan_stop_output, 1), + SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, + store_fan_stop_output, 2), + + /* pwm1 and pwm3 don't support max and step settings */ + SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 1), + SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 1), }; static ssize_t diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 7ab7967da0a..05f9225b6f9 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -56,9 +56,10 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_4(w83781d, w83782d, w83783s, as99127f); +enum chips { w83781d, w83782d, w83783s, as99127f }; + +/* Insmod parameters */ static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " @@ -1051,8 +1052,7 @@ w83781d_create_files(struct device *dev, int kind, int is_isa) /* Return 0 if detection is successful, -ENODEV otherwise */ static int -w83781d_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +w83781d_detect(struct i2c_client *client, struct i2c_board_info *info) { int val1, val2; struct w83781d_data *isa = w83781d_data_if_isa(); @@ -1537,7 +1537,7 @@ static struct i2c_driver w83781d_driver = { .remove = w83781d_remove, .id_table = w83781d_ids, .detect = w83781d_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 0410bf12c52..400a88bde27 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -52,7 +52,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83791d); static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); @@ -326,7 +325,7 @@ struct w83791d_data { static int w83791d_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83791d_detect(struct i2c_client *client, int kind, +static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83791d_remove(struct i2c_client *client); @@ -341,7 +340,7 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev); static void w83791d_init_client(struct i2c_client *client); static const struct i2c_device_id w83791d_id[] = { - { "w83791d", w83791d }, + { "w83791d", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83791d_id); @@ -355,7 +354,7 @@ static struct i2c_driver w83791d_driver = { .remove = w83791d_remove, .id_table = w83791d_id, .detect = w83791d_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* following are the sysfs callback functions */ @@ -1259,7 +1258,7 @@ error_sc_0: /* Return 0 if detection is successful, -ENODEV otherwise */ -static int w83791d_detect(struct i2c_client *client, int kind, +static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c index 38978851333..679718e6b01 100644 --- a/drivers/hwmon/w83792d.c +++ b/drivers/hwmon/w83792d.c @@ -50,7 +50,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83792d); static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); @@ -302,7 +301,7 @@ struct w83792d_data { static int w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83792d_detect(struct i2c_client *client, int kind, +static int w83792d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83792d_remove(struct i2c_client *client); static struct w83792d_data *w83792d_update_device(struct device *dev); @@ -314,7 +313,7 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev); static void w83792d_init_client(struct i2c_client *client); static const struct i2c_device_id w83792d_id[] = { - { "w83792d", w83792d }, + { "w83792d", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83792d_id); @@ -328,7 +327,7 @@ static struct i2c_driver w83792d_driver = { .remove = w83792d_remove, .id_table = w83792d_id, .detect = w83792d_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static inline long in_count_from_reg(int nr, struct w83792d_data *data) @@ -1263,7 +1262,7 @@ static const struct attribute_group w83792d_group = { /* Return 0 if detection is successful, -ENODEV otherwise */ static int -w83792d_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) +w83792d_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int val1, val2; diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 80a2191bf12..9a2022b6749 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -41,7 +41,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83793); static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); @@ -230,7 +229,7 @@ static u8 w83793_read_value(struct i2c_client *client, u16 reg); static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value); static int w83793_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83793_detect(struct i2c_client *client, int kind, +static int w83793_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83793_remove(struct i2c_client *client); static void w83793_init_client(struct i2c_client *client); @@ -238,7 +237,7 @@ static void w83793_update_nonvolatile(struct device *dev); static struct w83793_data *w83793_update_device(struct device *dev); static const struct i2c_device_id w83793_id[] = { - { "w83793", w83793 }, + { "w83793", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83793_id); @@ -252,7 +251,7 @@ static struct i2c_driver w83793_driver = { .remove = w83793_remove, .id_table = w83793_id, .detect = w83793_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static ssize_t @@ -1161,7 +1160,7 @@ ERROR_SC_0: } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int w83793_detect(struct i2c_client *client, int kind, +static int w83793_detect(struct i2c_client *client, struct i2c_board_info *info) { u8 tmp, bank, chip_id; diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c index 9b6c4c10fba..20781def65e 100644 --- a/drivers/hwmon/w83l785ts.c +++ b/drivers/hwmon/w83l785ts.c @@ -52,12 +52,6 @@ static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END }; /* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(w83l785ts); - -/* * The W83L785TS-S registers * Manufacturer ID is 0x5CA3 for Winbond. */ @@ -83,7 +77,7 @@ I2C_CLIENT_INSMOD_1(w83l785ts); static int w83l785ts_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83l785ts_detect(struct i2c_client *client, int kind, +static int w83l785ts_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83l785ts_remove(struct i2c_client *client); static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval); @@ -94,7 +88,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev); */ static const struct i2c_device_id w83l785ts_id[] = { - { "w83l785ts", w83l785ts }, + { "w83l785ts", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83l785ts_id); @@ -108,7 +102,7 @@ static struct i2c_driver w83l785ts_driver = { .remove = w83l785ts_remove, .id_table = w83l785ts_id, .detect = w83l785ts_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* @@ -146,7 +140,7 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 1); */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int w83l785ts_detect(struct i2c_client *client, int kind, +static int w83l785ts_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index 27da7d2b15f..0254e181893 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -38,7 +38,6 @@ static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83l786ng); static int reset; module_param(reset, bool, 0); @@ -147,14 +146,14 @@ struct w83l786ng_data { static int w83l786ng_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83l786ng_detect(struct i2c_client *client, int kind, +static int w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83l786ng_remove(struct i2c_client *client); static void w83l786ng_init_client(struct i2c_client *client); static struct w83l786ng_data *w83l786ng_update_device(struct device *dev); static const struct i2c_device_id w83l786ng_id[] = { - { "w83l786ng", w83l786ng }, + { "w83l786ng", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83l786ng_id); @@ -168,7 +167,7 @@ static struct i2c_driver w83l786ng_driver = { .remove = w83l786ng_remove, .id_table = w83l786ng_id, .detect = w83l786ng_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static u8 @@ -586,8 +585,7 @@ static const struct attribute_group w83l786ng_group = { }; static int -w83l786ng_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; u16 man_id; diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 049555777f6..7647a20523a 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1155,7 +1155,7 @@ static int i2c_pxa_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops i2c_pxa_dev_pm_ops = { +static const struct dev_pm_ops i2c_pxa_dev_pm_ops = { .suspend_noirq = i2c_pxa_suspend_noirq, .resume_noirq = i2c_pxa_resume_noirq, }; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 96aafb91b69..1d8c98613fa 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -967,7 +967,7 @@ static int s3c24xx_i2c_resume(struct device *dev) return 0; } -static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { +static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { .suspend_noirq = s3c24xx_i2c_suspend_noirq, .resume = s3c24xx_i2c_resume, }; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 86a9d4e8147..ccc46418ef7 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -647,7 +647,7 @@ static int sh_mobile_i2c_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = { +static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = { .runtime_suspend = sh_mobile_i2c_runtime_nop, .runtime_resume = sh_mobile_i2c_runtime_nop, }; diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 4f34823e86b..0ac2f90ab84 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -155,6 +155,35 @@ static void i2c_device_shutdown(struct device *dev) driver->shutdown(client); } +#ifdef CONFIG_SUSPEND +static int i2c_device_pm_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm; + + if (!dev->driver) + return 0; + pm = dev->driver->pm; + if (!pm || !pm->suspend) + return 0; + return pm->suspend(dev); +} + +static int i2c_device_pm_resume(struct device *dev) +{ + const struct dev_pm_ops *pm; + + if (!dev->driver) + return 0; + pm = dev->driver->pm; + if (!pm || !pm->resume) + return 0; + return pm->resume(dev); +} +#else +#define i2c_device_pm_suspend NULL +#define i2c_device_pm_resume NULL +#endif + static int i2c_device_suspend(struct device *dev, pm_message_t mesg) { struct i2c_client *client = i2c_verify_client(dev); @@ -219,6 +248,11 @@ static const struct attribute_group *i2c_dev_attr_groups[] = { NULL }; +const static struct dev_pm_ops i2c_device_pm_ops = { + .suspend = i2c_device_pm_suspend, + .resume = i2c_device_pm_resume, +}; + struct bus_type i2c_bus_type = { .name = "i2c", .match = i2c_device_match, @@ -227,6 +261,7 @@ struct bus_type i2c_bus_type = { .shutdown = i2c_device_shutdown, .suspend = i2c_device_suspend, .resume = i2c_device_resume, + .pm = &i2c_device_pm_ops, }; EXPORT_SYMBOL_GPL(i2c_bus_type); @@ -1184,7 +1219,7 @@ static int i2c_detect_address(struct i2c_client *temp_client, /* Finally call the custom detection function */ memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = addr; - err = driver->detect(temp_client, -1, &info); + err = driver->detect(temp_client, &info); if (err) { /* -ENODEV is returned if the detection fails. We catch it here as this isn't an error. */ @@ -1214,13 +1249,13 @@ static int i2c_detect_address(struct i2c_client *temp_client, static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) { - const struct i2c_client_address_data *address_data; + const unsigned short *address_list; struct i2c_client *temp_client; int i, err = 0; int adap_id = i2c_adapter_id(adapter); - address_data = driver->address_data; - if (!driver->detect || !address_data) + address_list = driver->address_list; + if (!driver->detect || !address_list) return 0; /* Set up a temporary client to help detect callback */ @@ -1235,7 +1270,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) /* Stop here if we can't use SMBUS_QUICK */ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) { - if (address_data->normal_i2c[0] == I2C_CLIENT_END) + if (address_list[0] == I2C_CLIENT_END) goto exit_free; dev_warn(&adapter->dev, "SMBus Quick command not supported, " @@ -1244,11 +1279,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) goto exit_free; } - for (i = 0; address_data->normal_i2c[i] != I2C_CLIENT_END; i += 1) { + for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { dev_dbg(&adapter->dev, "found normal entry for adapter %d, " - "addr 0x%02x\n", adap_id, - address_data->normal_i2c[i]); - temp_client->addr = address_data->normal_i2c[i]; + "addr 0x%02x\n", adap_id, address_list[i]); + temp_client->addr = address_list[i]; err = i2c_detect_address(temp_client, driver); if (err) goto exit_free; diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index d48c808d592..1edb596d927 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -319,7 +319,7 @@ static int adp5588_resume(struct device *dev) return 0; } -static struct dev_pm_ops adp5588_dev_pm_ops = { +static const struct dev_pm_ops adp5588_dev_pm_ops = { .suspend = adp5588_suspend, .resume = adp5588_resume, }; diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 076111fc72d..8e9380bfed4 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c @@ -295,7 +295,7 @@ static int sh_keysc_resume(struct device *dev) return 0; } -static struct dev_pm_ops sh_keysc_dev_pm_ops = { +static const struct dev_pm_ops sh_keysc_dev_pm_ops = { .suspend = sh_keysc_suspend, .resume = sh_keysc_resume, }; diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 690f3fafa03..61d10177fa8 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -247,7 +247,7 @@ static int bfin_rotary_resume(struct device *dev) return 0; } -static struct dev_pm_ops bfin_rotary_pm_ops = { +static const struct dev_pm_ops bfin_rotary_pm_ops = { .suspend = bfin_rotary_suspend, .resume = bfin_rotary_resume, }; diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index 21cb755a54f..ea4e1fd1265 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c @@ -127,7 +127,7 @@ static void pcspkr_shutdown(struct platform_device *dev) pcspkr_event(NULL, EV_SND, SND_BELL, 0); } -static struct dev_pm_ops pcspkr_pm_ops = { +static const struct dev_pm_ops pcspkr_pm_ops = { .suspend = pcspkr_suspend, }; diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c index 67fcd33595d..b79097e3028 100644 --- a/drivers/input/touchscreen/pcap_ts.c +++ b/drivers/input/touchscreen/pcap_ts.c @@ -233,7 +233,7 @@ static int pcap_ts_resume(struct device *dev) return 0; } -static struct dev_pm_ops pcap_ts_pm_ops = { +static const struct dev_pm_ops pcap_ts_pm_ops = { .suspend = pcap_ts_suspend, .resume = pcap_ts_resume, }; diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index f2cc13d7681..782f95822ea 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -50,7 +50,7 @@ static ssize_t led_brightness_store(struct device *dev, unsigned long state = simple_strtoul(buf, &after, 10); size_t count = after - buf; - if (*after && isspace(*after)) + if (isspace(*after)) count++; if (count == size) { diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index 3b83406de75..38b3378be44 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c @@ -83,7 +83,7 @@ static ssize_t led_delay_on_store(struct device *dev, unsigned long state = simple_strtoul(buf, &after, 10); size_t count = after - buf; - if (*after && isspace(*after)) + if (isspace(*after)) count++; if (count == size) { @@ -127,7 +127,7 @@ static ssize_t led_delay_off_store(struct device *dev, unsigned long state = simple_strtoul(buf, &after, 10); size_t count = after - buf; - if (*after && isspace(*after)) + if (isspace(*after)) count++; if (count == size) { diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index e412980763b..a93637223c8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2003 Christophe Saout <christophe@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> - * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -71,10 +71,21 @@ struct crypt_iv_operations { int (*ctr)(struct crypt_config *cc, struct dm_target *ti, const char *opts); void (*dtr)(struct crypt_config *cc); - const char *(*status)(struct crypt_config *cc); + int (*init)(struct crypt_config *cc); + int (*wipe)(struct crypt_config *cc); int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); }; +struct iv_essiv_private { + struct crypto_cipher *tfm; + struct crypto_hash *hash_tfm; + u8 *salt; +}; + +struct iv_benbi_private { + int shift; +}; + /* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. @@ -102,8 +113,8 @@ struct crypt_config { struct crypt_iv_operations *iv_gen_ops; char *iv_mode; union { - struct crypto_cipher *essiv_tfm; - int benbi_shift; + struct iv_essiv_private essiv; + struct iv_benbi_private benbi; } iv_gen_private; sector_t iv_offset; unsigned int iv_size; @@ -147,6 +158,9 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io); * plain: the initial vector is the 32-bit little-endian version of the sector * number, padded with zeros if necessary. * + * plain64: the initial vector is the 64-bit little-endian version of the sector + * number, padded with zeros if necessary. + * * essiv: "encrypted sector|salt initial vector", the sector number is * encrypted with the bulk cipher using a salt as key. The salt * should be derived from the bulk cipher's key via hashing. @@ -169,88 +183,123 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) return 0; } -static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, - const char *opts) +static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, + sector_t sector) { - struct crypto_cipher *essiv_tfm; - struct crypto_hash *hash_tfm; + memset(iv, 0, cc->iv_size); + *(u64 *)iv = cpu_to_le64(sector); + + return 0; +} + +/* Initialise ESSIV - compute salt but no local memory allocations */ +static int crypt_iv_essiv_init(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; struct hash_desc desc; struct scatterlist sg; - unsigned int saltsize; - u8 *salt; int err; - if (opts == NULL) { + sg_init_one(&sg, cc->key, cc->key_size); + desc.tfm = essiv->hash_tfm; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); + if (err) + return err; + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, + crypto_hash_digestsize(essiv->hash_tfm)); +} + +/* Wipe salt and reset key derived from volume key */ +static int crypt_iv_essiv_wipe(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); + + memset(essiv->salt, 0, salt_size); + + return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); +} + +static void crypt_iv_essiv_dtr(struct crypt_config *cc) +{ + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + + crypto_free_cipher(essiv->tfm); + essiv->tfm = NULL; + + crypto_free_hash(essiv->hash_tfm); + essiv->hash_tfm = NULL; + + kzfree(essiv->salt); + essiv->salt = NULL; +} + +static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct crypto_cipher *essiv_tfm = NULL; + struct crypto_hash *hash_tfm = NULL; + u8 *salt = NULL; + int err; + + if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; return -EINVAL; } - /* Hash the cipher key with the given hash algorithm */ + /* Allocate hash algorithm */ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; - return PTR_ERR(hash_tfm); + err = PTR_ERR(hash_tfm); + goto bad; } - saltsize = crypto_hash_digestsize(hash_tfm); - salt = kmalloc(saltsize, GFP_KERNEL); - if (salt == NULL) { + salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); + if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; - crypto_free_hash(hash_tfm); - return -ENOMEM; + err = -ENOMEM; + goto bad; } - sg_init_one(&sg, cc->key, cc->key_size); - desc.tfm = hash_tfm; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); - crypto_free_hash(hash_tfm); - - if (err) { - ti->error = "Error calculating hash in ESSIV"; - kfree(salt); - return err; - } - - /* Setup the essiv_tfm with the given salt */ + /* Allocate essiv_tfm */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; - kfree(salt); - return PTR_ERR(essiv_tfm); + err = PTR_ERR(essiv_tfm); + goto bad; } if (crypto_cipher_blocksize(essiv_tfm) != crypto_ablkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; - crypto_free_cipher(essiv_tfm); - kfree(salt); - return -EINVAL; + err = -EINVAL; + goto bad; } - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); - if (err) { - ti->error = "Failed to set key for ESSIV cipher"; - crypto_free_cipher(essiv_tfm); - kfree(salt); - return err; - } - kfree(salt); - cc->iv_gen_private.essiv_tfm = essiv_tfm; + cc->iv_gen_private.essiv.salt = salt; + cc->iv_gen_private.essiv.tfm = essiv_tfm; + cc->iv_gen_private.essiv.hash_tfm = hash_tfm; + return 0; -} -static void crypt_iv_essiv_dtr(struct crypt_config *cc) -{ - crypto_free_cipher(cc->iv_gen_private.essiv_tfm); - cc->iv_gen_private.essiv_tfm = NULL; +bad: + if (essiv_tfm && !IS_ERR(essiv_tfm)) + crypto_free_cipher(essiv_tfm); + if (hash_tfm && !IS_ERR(hash_tfm)) + crypto_free_hash(hash_tfm); + kfree(salt); + return err; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); - crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); return 0; } @@ -273,7 +322,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - cc->iv_gen_private.benbi_shift = 9 - log; + cc->iv_gen_private.benbi.shift = 9 - log; return 0; } @@ -288,7 +337,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ - val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); return 0; @@ -305,9 +354,15 @@ static struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; +static struct crypt_iv_operations crypt_iv_plain64_ops = { + .generator = crypt_iv_plain64_gen +}; + static struct crypt_iv_operations crypt_iv_essiv_ops = { .ctr = crypt_iv_essiv_ctr, .dtr = crypt_iv_essiv_dtr, + .init = crypt_iv_essiv_init, + .wipe = crypt_iv_essiv_wipe, .generator = crypt_iv_essiv_gen }; @@ -934,14 +989,14 @@ static int crypt_set_key(struct crypt_config *cc, char *key) set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - return 0; + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } static int crypt_wipe_key(struct crypt_config *cc) { clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); memset(&cc->key, 0, cc->key_size * sizeof(u8)); - return 0; + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } /* @@ -983,11 +1038,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -ENOMEM; } - if (crypt_set_key(cc, argv[1])) { - ti->error = "Error decoding key"; - goto bad_cipher; - } - /* Compatibility mode for old dm-crypt cipher strings */ if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { chainmode = "cbc"; @@ -1015,6 +1065,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) strcpy(cc->chainmode, chainmode); cc->tfm = tfm; + if (crypt_set_key(cc, argv[1]) < 0) { + ti->error = "Error decoding and setting key"; + goto bad_ivmode; + } + /* * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". * See comments at iv code @@ -1024,6 +1079,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops = NULL; else if (strcmp(ivmode, "plain") == 0) cc->iv_gen_ops = &crypt_iv_plain_ops; + else if (strcmp(ivmode, "plain64") == 0) + cc->iv_gen_ops = &crypt_iv_plain64_ops; else if (strcmp(ivmode, "essiv") == 0) cc->iv_gen_ops = &crypt_iv_essiv_ops; else if (strcmp(ivmode, "benbi") == 0) @@ -1039,6 +1096,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad_ivmode; + if (cc->iv_gen_ops && cc->iv_gen_ops->init && + cc->iv_gen_ops->init(cc) < 0) { + ti->error = "Error initialising IV"; + goto bad_slab_pool; + } + cc->iv_size = crypto_ablkcipher_ivsize(tfm); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ @@ -1085,11 +1148,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_bs; } - if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { - ti->error = "Error setting key"; - goto bad_device; - } - if (sscanf(argv[2], "%llu", &tmpll) != 1) { ti->error = "Invalid iv_offset sector"; goto bad_device; @@ -1278,6 +1336,7 @@ static void crypt_resume(struct dm_target *ti) static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) { struct crypt_config *cc = ti->private; + int ret = -EINVAL; if (argc < 2) goto error; @@ -1287,10 +1346,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) DMWARN("not suspended during key manipulation."); return -EINVAL; } - if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) - return crypt_set_key(cc, argv[2]); - if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) + if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { + ret = crypt_set_key(cc, argv[2]); + if (ret) + return ret; + if (cc->iv_gen_ops && cc->iv_gen_ops->init) + ret = cc->iv_gen_ops->init(cc); + return ret; + } + if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { + if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { + ret = cc->iv_gen_ops->wipe(cc); + if (ret) + return ret; + } return crypt_wipe_key(cc); + } } error: diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 7dbe652efb5..2b7907b6dd0 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -172,7 +172,8 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, } /* Validate the chunk size against the device block size */ - if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) { + if (chunk_size % + (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) { *error = "Chunk size is not a multiple of device blocksize"; return -EINVAL; } @@ -190,6 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, } int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, + struct dm_snapshot *snap, unsigned *args_used, struct dm_exception_store **store) { @@ -198,7 +200,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, struct dm_exception_store *tmp_store; char persistent; - if (argc < 3) { + if (argc < 2) { ti->error = "Insufficient exception store arguments"; return -EINVAL; } @@ -209,14 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, return -ENOMEM; } - persistent = toupper(*argv[1]); + persistent = toupper(*argv[0]); if (persistent == 'P') type = get_type("P"); else if (persistent == 'N') type = get_type("N"); else { ti->error = "Persistent flag is not P or N"; - return -EINVAL; + r = -EINVAL; + goto bad_type; } if (!type) { @@ -226,32 +229,23 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, } tmp_store->type = type; - tmp_store->ti = ti; - - r = dm_get_device(ti, argv[0], 0, 0, - FMODE_READ | FMODE_WRITE, &tmp_store->cow); - if (r) { - ti->error = "Cannot get COW device"; - goto bad_cow; - } + tmp_store->snap = snap; - r = set_chunk_size(tmp_store, argv[2], &ti->error); + r = set_chunk_size(tmp_store, argv[1], &ti->error); if (r) - goto bad_ctr; + goto bad; r = type->ctr(tmp_store, 0, NULL); if (r) { ti->error = "Exception store type constructor failed"; - goto bad_ctr; + goto bad; } - *args_used = 3; + *args_used = 2; *store = tmp_store; return 0; -bad_ctr: - dm_put_device(ti, tmp_store->cow); -bad_cow: +bad: put_type(type); bad_type: kfree(tmp_store); @@ -262,7 +256,6 @@ EXPORT_SYMBOL(dm_exception_store_create); void dm_exception_store_destroy(struct dm_exception_store *store) { store->type->dtr(store); - dm_put_device(store->ti, store->cow); put_type(store->type); kfree(store); } diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 8a223a48802..e8dfa06af3b 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -26,7 +26,7 @@ typedef sector_t chunk_t; * of chunks that follow contiguously. Remaining bits hold the number of the * chunk within the device. */ -struct dm_snap_exception { +struct dm_exception { struct list_head hash_list; chunk_t old_chunk; @@ -64,17 +64,34 @@ struct dm_exception_store_type { * Find somewhere to store the next exception. */ int (*prepare_exception) (struct dm_exception_store *store, - struct dm_snap_exception *e); + struct dm_exception *e); /* * Update the metadata with this exception. */ void (*commit_exception) (struct dm_exception_store *store, - struct dm_snap_exception *e, + struct dm_exception *e, void (*callback) (void *, int success), void *callback_context); /* + * Returns 0 if the exception store is empty. + * + * If there are exceptions still to be merged, sets + * *last_old_chunk and *last_new_chunk to the most recent + * still-to-be-merged chunk and returns the number of + * consecutive previous ones. + */ + int (*prepare_merge) (struct dm_exception_store *store, + chunk_t *last_old_chunk, chunk_t *last_new_chunk); + + /* + * Clear the last n exceptions. + * nr_merged must be <= the value returned by prepare_merge. + */ + int (*commit_merge) (struct dm_exception_store *store, int nr_merged); + + /* * The snapshot is invalid, note this in the metadata. */ void (*drop_snapshot) (struct dm_exception_store *store); @@ -86,19 +103,19 @@ struct dm_exception_store_type { /* * Return how full the snapshot is. */ - void (*fraction_full) (struct dm_exception_store *store, - sector_t *numerator, - sector_t *denominator); + void (*usage) (struct dm_exception_store *store, + sector_t *total_sectors, sector_t *sectors_allocated, + sector_t *metadata_sectors); /* For internal device-mapper use only. */ struct list_head list; }; +struct dm_snapshot; + struct dm_exception_store { struct dm_exception_store_type *type; - struct dm_target *ti; - - struct dm_dev *cow; + struct dm_snapshot *snap; /* Size of data blocks saved - must be a power of 2 */ unsigned chunk_size; @@ -109,6 +126,11 @@ struct dm_exception_store { }; /* + * Obtain the cow device used by a given snapshot. + */ +struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); + +/* * Funtions to manipulate consecutive chunks */ # if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64) @@ -120,18 +142,25 @@ static inline chunk_t dm_chunk_number(chunk_t chunk) return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL); } -static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) +static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e) { return e->new_chunk >> DM_CHUNK_NUMBER_BITS; } -static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) +static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e) { e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS); BUG_ON(!dm_consecutive_chunk_count(e)); } +static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e) +{ + BUG_ON(!dm_consecutive_chunk_count(e)); + + e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS); +} + # else # define DM_CHUNK_CONSECUTIVE_BITS 0 @@ -140,12 +169,16 @@ static inline chunk_t dm_chunk_number(chunk_t chunk) return chunk; } -static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) +static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e) { return 0; } -static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) +static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e) +{ +} + +static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e) { } @@ -162,7 +195,7 @@ static inline sector_t get_dev_size(struct block_device *bdev) static inline chunk_t sector_to_chunk(struct dm_exception_store *store, sector_t sector) { - return (sector & ~store->chunk_mask) >> store->chunk_shift; + return sector >> store->chunk_shift; } int dm_exception_store_type_register(struct dm_exception_store_type *type); @@ -173,6 +206,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, char **error); int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, + struct dm_snapshot *snap, unsigned *args_used, struct dm_exception_store **store); void dm_exception_store_destroy(struct dm_exception_store *store); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3a2e6a2f8bd..10f457ca6af 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -5,6 +5,8 @@ * This file is released under the GPL. */ +#include "dm.h" + #include <linux/device-mapper.h> #include <linux/bio.h> @@ -14,12 +16,19 @@ #include <linux/slab.h> #include <linux/dm-io.h> +#define DM_MSG_PREFIX "io" + +#define DM_IO_MAX_REGIONS BITS_PER_LONG + struct dm_io_client { mempool_t *pool; struct bio_set *bios; }; -/* FIXME: can we shrink this ? */ +/* + * Aligning 'struct io' reduces the number of bits required to store + * its address. Refer to store_io_and_region_in_bio() below. + */ struct io { unsigned long error_bits; unsigned long eopnotsupp_bits; @@ -28,7 +37,9 @@ struct io { struct dm_io_client *client; io_notify_fn callback; void *context; -}; +} __attribute__((aligned(DM_IO_MAX_REGIONS))); + +static struct kmem_cache *_dm_io_cache; /* * io contexts are only dynamically allocated for asynchronous @@ -53,7 +64,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) if (!client) return ERR_PTR(-ENOMEM); - client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io)); + client->pool = mempool_create_slab_pool(ios, _dm_io_cache); if (!client->pool) goto bad; @@ -88,18 +99,29 @@ EXPORT_SYMBOL(dm_io_client_destroy); /*----------------------------------------------------------------- * We need to keep track of which region a bio is doing io for. - * In order to save a memory allocation we store this the last - * bvec which we know is unused (blech). - * XXX This is ugly and can OOPS with some configs... find another way. + * To avoid a memory allocation to store just 5 or 6 bits, we + * ensure the 'struct io' pointer is aligned so enough low bits are + * always zero and then combine it with the region number directly in + * bi_private. *---------------------------------------------------------------*/ -static inline void bio_set_region(struct bio *bio, unsigned region) +static void store_io_and_region_in_bio(struct bio *bio, struct io *io, + unsigned region) { - bio->bi_io_vec[bio->bi_max_vecs].bv_len = region; + if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { + DMCRIT("Unaligned struct io pointer %p", io); + BUG(); + } + + bio->bi_private = (void *)((unsigned long)io | region); } -static inline unsigned bio_get_region(struct bio *bio) +static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, + unsigned *region) { - return bio->bi_io_vec[bio->bi_max_vecs].bv_len; + unsigned long val = (unsigned long)bio->bi_private; + + *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); + *region = val & (DM_IO_MAX_REGIONS - 1); } /*----------------------------------------------------------------- @@ -140,10 +162,8 @@ static void endio(struct bio *bio, int error) /* * The bio destructor in bio_put() may use the io object. */ - io = bio->bi_private; - region = bio_get_region(bio); + retrieve_io_and_region_from_bio(bio, &io, ®ion); - bio->bi_max_vecs++; bio_put(bio); dec_count(io, region, error); @@ -243,7 +263,10 @@ static void vm_dp_init(struct dpages *dp, void *data) static void dm_bio_destructor(struct bio *bio) { - struct io *io = bio->bi_private; + unsigned region; + struct io *io; + + retrieve_io_and_region_from_bio(bio, &io, ®ion); bio_free(bio, io->client->bios); } @@ -286,26 +309,23 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, unsigned num_bvecs; sector_t remaining = where->count; - while (remaining) { + /* + * where->count may be zero if rw holds a write barrier and we + * need to send a zero-sized barrier. + */ + do { /* - * Allocate a suitably sized-bio: we add an extra - * bvec for bio_get/set_region() and decrement bi_max_vecs - * to hide it from bio_add_page(). + * Allocate a suitably sized-bio. */ num_bvecs = dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)); - num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), - num_bvecs); - if (unlikely(num_bvecs > BIO_MAX_PAGES)) - num_bvecs = BIO_MAX_PAGES; + num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs); bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); bio->bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; - bio->bi_private = io; bio->bi_destructor = dm_bio_destructor; - bio->bi_max_vecs--; - bio_set_region(bio, region); + store_io_and_region_in_bio(bio, io, region); /* * Try and add as many pages as possible. @@ -323,7 +343,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, atomic_inc(&io->count); submit_bio(rw, bio); - } + } while (remaining); } static void dispatch_io(int rw, unsigned int num_regions, @@ -333,6 +353,8 @@ static void dispatch_io(int rw, unsigned int num_regions, int i; struct dpages old_pages = *dp; + BUG_ON(num_regions > DM_IO_MAX_REGIONS); + if (sync) rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); @@ -342,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count) + if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) do_region(rw, i, where + i, dp, io); } @@ -357,7 +379,14 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, struct dm_io_region *where, int rw, struct dpages *dp, unsigned long *error_bits) { - struct io io; + /* + * gcc <= 4.3 can't do the alignment for stack variables, so we must + * align it on our own. + * volatile prevents the optimizer from removing or reusing + * "io_" field from the stack frame (allowed in ANSI C). + */ + volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; + struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); @@ -365,33 +394,33 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, } retry: - io.error_bits = 0; - io.eopnotsupp_bits = 0; - atomic_set(&io.count, 1); /* see dispatch_io() */ - io.sleeper = current; - io.client = client; + io->error_bits = 0; + io->eopnotsupp_bits = 0; + atomic_set(&io->count, 1); /* see dispatch_io() */ + io->sleeper = current; + io->client = client; - dispatch_io(rw, num_regions, where, dp, &io, 1); + dispatch_io(rw, num_regions, where, dp, io, 1); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); - if (!atomic_read(&io.count)) + if (!atomic_read(&io->count)) break; io_schedule(); } set_current_state(TASK_RUNNING); - if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { + if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { rw &= ~(1 << BIO_RW_BARRIER); goto retry; } if (error_bits) - *error_bits = io.error_bits; + *error_bits = io->error_bits; - return io.error_bits ? -EIO : 0; + return io->error_bits ? -EIO : 0; } static int async_io(struct dm_io_client *client, unsigned int num_regions, @@ -472,3 +501,18 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, &dp, io_req->notify.fn, io_req->notify.context); } EXPORT_SYMBOL(dm_io); + +int __init dm_io_init(void) +{ + _dm_io_cache = KMEM_CACHE(io, 0); + if (!_dm_io_cache) + return -ENOMEM; + + return 0; +} + +void dm_io_exit(void) +{ + kmem_cache_destroy(_dm_io_cache); + _dm_io_cache = NULL; +} diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a6794293158..1d669322b27 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -56,6 +56,11 @@ static void dm_hash_remove_all(int keep_open_devices); */ static DECLARE_RWSEM(_hash_lock); +/* + * Protects use of mdptr to obtain hash cell name and uuid from mapped device. + */ +static DEFINE_MUTEX(dm_hash_cells_mutex); + static void init_buckets(struct list_head *buckets) { unsigned int i; @@ -206,7 +211,9 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); } dm_get(md); + mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(md, cell); + mutex_unlock(&dm_hash_cells_mutex); up_write(&_hash_lock); return 0; @@ -224,9 +231,11 @@ static void __hash_remove(struct hash_cell *hc) /* remove from the dev hash */ list_del(&hc->uuid_list); list_del(&hc->name_list); + mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(hc->md, NULL); + mutex_unlock(&dm_hash_cells_mutex); - table = dm_get_table(hc->md); + table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); @@ -321,13 +330,15 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) */ list_del(&hc->name_list); old_name = hc->name; + mutex_lock(&dm_hash_cells_mutex); hc->name = new_name; + mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->name_list, _name_buckets + hash_str(new_name)); /* * Wake up any dm event waiters. */ - table = dm_get_table(hc->md); + table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); @@ -512,8 +523,6 @@ static int list_versions(struct dm_ioctl *param, size_t param_size) return 0; } - - static int check_name(const char *name) { if (strchr(name, '/')) { @@ -525,6 +534,40 @@ static int check_name(const char *name) } /* + * On successful return, the caller must not attempt to acquire + * _hash_lock without first calling dm_table_put, because dm_table_destroy + * waits for this dm_table_put and could be called under this lock. + */ +static struct dm_table *dm_get_inactive_table(struct mapped_device *md) +{ + struct hash_cell *hc; + struct dm_table *table = NULL; + + down_read(&_hash_lock); + hc = dm_get_mdptr(md); + if (!hc || hc->md != md) { + DMWARN("device has been removed from the dev hash table."); + goto out; + } + + table = hc->new_map; + if (table) + dm_table_get(table); + +out: + up_read(&_hash_lock); + + return table; +} + +static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, + struct dm_ioctl *param) +{ + return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? + dm_get_inactive_table(md) : dm_get_live_table(md); +} + +/* * Fills in a dm_ioctl structure, ready for sending back to * userland. */ @@ -536,7 +579,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | DM_ACTIVE_PRESENT_FLAG); - if (dm_suspended(md)) + if (dm_suspended_md(md)) param->flags |= DM_SUSPEND_FLAG; param->dev = huge_encode_dev(disk_devt(disk)); @@ -548,18 +591,30 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) */ param->open_count = dm_open_count(md); - if (get_disk_ro(disk)) - param->flags |= DM_READONLY_FLAG; - param->event_nr = dm_get_event_nr(md); + param->target_count = 0; - table = dm_get_table(md); + table = dm_get_live_table(md); if (table) { - param->flags |= DM_ACTIVE_PRESENT_FLAG; - param->target_count = dm_table_get_num_targets(table); + if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { + if (get_disk_ro(disk)) + param->flags |= DM_READONLY_FLAG; + param->target_count = dm_table_get_num_targets(table); + } dm_table_put(table); - } else - param->target_count = 0; + + param->flags |= DM_ACTIVE_PRESENT_FLAG; + } + + if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { + table = dm_get_inactive_table(md); + if (table) { + if (!(dm_table_get_mode(table) & FMODE_WRITE)) + param->flags |= DM_READONLY_FLAG; + param->target_count = dm_table_get_num_targets(table); + dm_table_put(table); + } + } return 0; } @@ -634,9 +689,9 @@ static struct mapped_device *find_device(struct dm_ioctl *param) * Sneakily write in both the name and the uuid * while we have the cell. */ - strncpy(param->name, hc->name, sizeof(param->name)); + strlcpy(param->name, hc->name, sizeof(param->name)); if (hc->uuid) - strncpy(param->uuid, hc->uuid, sizeof(param->uuid)-1); + strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); else param->uuid[0] = '\0'; @@ -784,7 +839,7 @@ static int do_suspend(struct dm_ioctl *param) if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) r = dm_suspend(md, suspend_flags); if (!r) @@ -800,7 +855,7 @@ static int do_resume(struct dm_ioctl *param) unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct hash_cell *hc; struct mapped_device *md; - struct dm_table *new_map; + struct dm_table *new_map, *old_map = NULL; down_write(&_hash_lock); @@ -826,14 +881,14 @@ static int do_resume(struct dm_ioctl *param) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) dm_suspend(md, suspend_flags); - r = dm_swap_table(md, new_map); - if (r) { + old_map = dm_swap_table(md, new_map); + if (IS_ERR(old_map)) { dm_table_destroy(new_map); dm_put(md); - return r; + return PTR_ERR(old_map); } if (dm_table_get_mode(new_map) & FMODE_WRITE) @@ -842,9 +897,11 @@ static int do_resume(struct dm_ioctl *param) set_disk_ro(dm_disk(md), 1); } - if (dm_suspended(md)) + if (dm_suspended_md(md)) r = dm_resume(md); + if (old_map) + dm_table_destroy(old_map); if (!r) { dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); @@ -982,7 +1039,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); @@ -1215,7 +1272,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_deps(table, param, param_size); dm_table_put(table); @@ -1244,13 +1301,13 @@ static int table_status(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); } - out: +out: dm_put(md); return r; } @@ -1288,10 +1345,15 @@ static int target_message(struct dm_ioctl *param, size_t param_size) goto out; } - table = dm_get_table(md); + table = dm_get_live_table(md); if (!table) goto out_argv; + if (dm_deleting_md(md)) { + r = -ENXIO; + goto out_table; + } + ti = dm_table_find_target(table, tmsg->sector); if (!dm_target_is_valid(ti)) { DMWARN("Target message sector outside device."); @@ -1303,6 +1365,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size) r = -EINVAL; } + out_table: dm_table_put(table); out_argv: kfree(argv); @@ -1582,8 +1645,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) if (!md) return -ENXIO; - dm_get(md); - down_read(&_hash_lock); + mutex_lock(&dm_hash_cells_mutex); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { r = -ENXIO; @@ -1596,8 +1658,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) strcpy(uuid, hc->uuid ? : ""); out: - up_read(&_hash_lock); - dm_put(md); + mutex_unlock(&dm_hash_cells_mutex); return r; } diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 3e3fc06cb86..addf8347504 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -450,7 +450,10 @@ static void dispatch_job(struct kcopyd_job *job) { struct dm_kcopyd_client *kc = job->kc; atomic_inc(&kc->nr_jobs); - push(&kc->pages_jobs, job); + if (unlikely(!job->source.count)) + push(&kc->complete_jobs, job); + else + push(&kc->pages_jobs, job); wake(kc); } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 9443896ede0..7035582786f 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -145,8 +145,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type) EXPORT_SYMBOL(dm_dirty_log_type_unregister); struct dm_dirty_log *dm_dirty_log_create(const char *type_name, - struct dm_target *ti, - unsigned int argc, char **argv) + struct dm_target *ti, + int (*flush_callback_fn)(struct dm_target *ti), + unsigned int argc, char **argv) { struct dm_dirty_log_type *type; struct dm_dirty_log *log; @@ -161,6 +162,7 @@ struct dm_dirty_log *dm_dirty_log_create(const char *type_name, return NULL; } + log->flush_callback_fn = flush_callback_fn; log->type = type; if (type->ctr(log, ti, argc, argv)) { kfree(log); @@ -208,7 +210,9 @@ struct log_header { struct log_c { struct dm_target *ti; - int touched; + int touched_dirtied; + int touched_cleaned; + int flush_failed; uint32_t region_size; unsigned int region_count; region_t sync_count; @@ -233,6 +237,7 @@ struct log_c { * Disk log fields */ int log_dev_failed; + int log_dev_flush_failed; struct dm_dev *log_dev; struct log_header header; @@ -253,14 +258,14 @@ static inline void log_set_bit(struct log_c *l, uint32_t *bs, unsigned bit) { ext2_set_bit(bit, (unsigned long *) bs); - l->touched = 1; + l->touched_cleaned = 1; } static inline void log_clear_bit(struct log_c *l, uint32_t *bs, unsigned bit) { ext2_clear_bit(bit, (unsigned long *) bs); - l->touched = 1; + l->touched_dirtied = 1; } /*---------------------------------------------------------------- @@ -287,6 +292,19 @@ static int rw_header(struct log_c *lc, int rw) return dm_io(&lc->io_req, 1, &lc->header_location, NULL); } +static int flush_header(struct log_c *lc) +{ + struct dm_io_region null_location = { + .bdev = lc->header_location.bdev, + .sector = 0, + .count = 0, + }; + + lc->io_req.bi_rw = WRITE_BARRIER; + + return dm_io(&lc->io_req, 1, &null_location, NULL); +} + static int read_header(struct log_c *log) { int r; @@ -378,7 +396,9 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, } lc->ti = ti; - lc->touched = 0; + lc->touched_dirtied = 0; + lc->touched_cleaned = 0; + lc->flush_failed = 0; lc->region_size = region_size; lc->region_count = region_count; lc->sync = sync; @@ -406,6 +426,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, } else { lc->log_dev = dev; lc->log_dev_failed = 0; + lc->log_dev_flush_failed = 0; lc->header_location.bdev = lc->log_dev->bdev; lc->header_location.sector = 0; @@ -614,6 +635,11 @@ static int disk_resume(struct dm_dirty_log *log) /* write the new header */ r = rw_header(lc, WRITE); + if (!r) { + r = flush_header(lc); + if (r) + lc->log_dev_flush_failed = 1; + } if (r) { DMWARN("%s: Failed to write header on dirty region log device", lc->log_dev->name); @@ -656,18 +682,40 @@ static int core_flush(struct dm_dirty_log *log) static int disk_flush(struct dm_dirty_log *log) { - int r; - struct log_c *lc = (struct log_c *) log->context; + int r, i; + struct log_c *lc = log->context; /* only write if the log has changed */ - if (!lc->touched) + if (!lc->touched_cleaned && !lc->touched_dirtied) return 0; + if (lc->touched_cleaned && log->flush_callback_fn && + log->flush_callback_fn(lc->ti)) { + /* + * At this point it is impossible to determine which + * regions are clean and which are dirty (without + * re-reading the log off disk). So mark all of them + * dirty. + */ + lc->flush_failed = 1; + for (i = 0; i < lc->region_count; i++) + log_clear_bit(lc, lc->clean_bits, i); + } + r = rw_header(lc, WRITE); if (r) fail_log_device(lc); - else - lc->touched = 0; + else { + if (lc->touched_dirtied) { + r = flush_header(lc); + if (r) { + lc->log_dev_flush_failed = 1; + fail_log_device(lc); + } else + lc->touched_dirtied = 0; + } + lc->touched_cleaned = 0; + } return r; } @@ -681,7 +729,8 @@ static void core_mark_region(struct dm_dirty_log *log, region_t region) static void core_clear_region(struct dm_dirty_log *log, region_t region) { struct log_c *lc = (struct log_c *) log->context; - log_set_bit(lc, lc->clean_bits, region); + if (likely(!lc->flush_failed)) + log_set_bit(lc, lc->clean_bits, region); } static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) @@ -762,7 +811,9 @@ static int disk_status(struct dm_dirty_log *log, status_type_t status, switch(status) { case STATUSTYPE_INFO: DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name, - lc->log_dev_failed ? 'D' : 'A'); + lc->log_dev_flush_failed ? 'F' : + lc->log_dev_failed ? 'D' : + 'A'); break; case STATUSTYPE_TABLE: diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index dce971dbdfa..e81345a1d08 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -93,6 +93,10 @@ struct multipath { * can resubmit bios on error. */ mempool_t *mpio_pool; + + struct mutex work_mutex; + + unsigned suspended; /* Don't create new I/O internally when set. */ }; /* @@ -198,6 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) m->queue_io = 1; INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); + mutex_init(&m->work_mutex); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); @@ -885,13 +890,18 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, return r; } -static void multipath_dtr(struct dm_target *ti) +static void flush_multipath_work(void) { - struct multipath *m = (struct multipath *) ti->private; - flush_workqueue(kmpath_handlerd); flush_workqueue(kmultipathd); flush_scheduled_work(); +} + +static void multipath_dtr(struct dm_target *ti) +{ + struct multipath *m = ti->private; + + flush_multipath_work(); free_multipath(m); } @@ -1261,6 +1271,16 @@ static void multipath_presuspend(struct dm_target *ti) queue_if_no_path(m, 0, 1); } +static void multipath_postsuspend(struct dm_target *ti) +{ + struct multipath *m = ti->private; + + mutex_lock(&m->work_mutex); + m->suspended = 1; + flush_multipath_work(); + mutex_unlock(&m->work_mutex); +} + /* * Restore the queue_if_no_path setting. */ @@ -1269,6 +1289,10 @@ static void multipath_resume(struct dm_target *ti) struct multipath *m = (struct multipath *) ti->private; unsigned long flags; + mutex_lock(&m->work_mutex); + m->suspended = 0; + mutex_unlock(&m->work_mutex); + spin_lock_irqsave(&m->lock, flags); m->queue_if_no_path = m->saved_queue_if_no_path; spin_unlock_irqrestore(&m->lock, flags); @@ -1397,51 +1421,71 @@ static int multipath_status(struct dm_target *ti, status_type_t type, static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) { - int r; + int r = -EINVAL; struct dm_dev *dev; struct multipath *m = (struct multipath *) ti->private; action_fn action; + mutex_lock(&m->work_mutex); + + if (m->suspended) { + r = -EBUSY; + goto out; + } + + if (dm_suspended(ti)) { + r = -EBUSY; + goto out; + } + if (argc == 1) { - if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) - return queue_if_no_path(m, 1, 0); - else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) - return queue_if_no_path(m, 0, 0); + if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { + r = queue_if_no_path(m, 1, 0); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { + r = queue_if_no_path(m, 0, 0); + goto out; + } } - if (argc != 2) - goto error; + if (argc != 2) { + DMWARN("Unrecognised multipath message received."); + goto out; + } - if (!strnicmp(argv[0], MESG_STR("disable_group"))) - return bypass_pg_num(m, argv[1], 1); - else if (!strnicmp(argv[0], MESG_STR("enable_group"))) - return bypass_pg_num(m, argv[1], 0); - else if (!strnicmp(argv[0], MESG_STR("switch_group"))) - return switch_pg_num(m, argv[1]); - else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) + if (!strnicmp(argv[0], MESG_STR("disable_group"))) { + r = bypass_pg_num(m, argv[1], 1); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { + r = bypass_pg_num(m, argv[1], 0); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { + r = switch_pg_num(m, argv[1]); + goto out; + } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) action = reinstate_path; else if (!strnicmp(argv[0], MESG_STR("fail_path"))) action = fail_path; - else - goto error; + else { + DMWARN("Unrecognised multipath message received."); + goto out; + } r = dm_get_device(ti, argv[1], ti->begin, ti->len, dm_table_get_mode(ti->table), &dev); if (r) { DMWARN("message: error getting device %s", argv[1]); - return -EINVAL; + goto out; } r = action_dev(m, dev, action); dm_put_device(ti, dev); +out: + mutex_unlock(&m->work_mutex); return r; - -error: - DMWARN("Unrecognised multipath message received."); - return -EINVAL; } static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, @@ -1567,13 +1611,14 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 1, 0}, + .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, .map_rq = multipath_map, .rq_end_io = multipath_end_io, .presuspend = multipath_presuspend, + .postsuspend = multipath_postsuspend, .resume = multipath_resume, .status = multipath_status, .message = multipath_message, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index cc9dc79b078..ad779bd13ae 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -35,6 +35,7 @@ static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); *---------------------------------------------------------------*/ enum dm_raid1_error { DM_RAID1_WRITE_ERROR, + DM_RAID1_FLUSH_ERROR, DM_RAID1_SYNC_ERROR, DM_RAID1_READ_ERROR }; @@ -57,6 +58,7 @@ struct mirror_set { struct bio_list reads; struct bio_list writes; struct bio_list failures; + struct bio_list holds; /* bios are waiting until suspend */ struct dm_region_hash *rh; struct dm_kcopyd_client *kcopyd_client; @@ -67,6 +69,7 @@ struct mirror_set { region_t nr_regions; int in_sync; int log_failure; + int leg_failure; atomic_t suspend; atomic_t default_mirror; /* Default mirror */ @@ -179,6 +182,17 @@ static void set_default_mirror(struct mirror *m) atomic_set(&ms->default_mirror, m - m0); } +static struct mirror *get_valid_mirror(struct mirror_set *ms) +{ + struct mirror *m; + + for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) + if (!atomic_read(&m->error_count)) + return m; + + return NULL; +} + /* fail_mirror * @m: mirror device to fail * @error_type: one of the enum's, DM_RAID1_*_ERROR @@ -198,6 +212,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) struct mirror_set *ms = m->ms; struct mirror *new; + ms->leg_failure = 1; + /* * error_count is used for nothing more than a * simple way to tell if a device has encountered @@ -224,19 +240,50 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) goto out; } - for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) - if (!atomic_read(&new->error_count)) { - set_default_mirror(new); - break; - } - - if (unlikely(new == ms->mirror + ms->nr_mirrors)) + new = get_valid_mirror(ms); + if (new) + set_default_mirror(new); + else DMWARN("All sides of mirror have failed."); out: schedule_work(&ms->trigger_event); } +static int mirror_flush(struct dm_target *ti) +{ + struct mirror_set *ms = ti->private; + unsigned long error_bits; + + unsigned int i; + struct dm_io_region io[ms->nr_mirrors]; + struct mirror *m; + struct dm_io_request io_req = { + .bi_rw = WRITE_BARRIER, + .mem.type = DM_IO_KMEM, + .mem.ptr.bvec = NULL, + .client = ms->io_client, + }; + + for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { + io[i].bdev = m->dev->bdev; + io[i].sector = 0; + io[i].count = 0; + } + + error_bits = -1; + dm_io(&io_req, ms->nr_mirrors, io, &error_bits); + if (unlikely(error_bits != 0)) { + for (i = 0; i < ms->nr_mirrors; i++) + if (test_bit(i, &error_bits)) + fail_mirror(ms->mirror + i, + DM_RAID1_FLUSH_ERROR); + return -EIO; + } + + return 0; +} + /*----------------------------------------------------------------- * Recovery. * @@ -396,6 +443,8 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) */ static sector_t map_sector(struct mirror *m, struct bio *bio) { + if (unlikely(!bio->bi_size)) + return 0; return m->offset + (bio->bi_sector - m->ms->ti->begin); } @@ -413,6 +462,27 @@ static void map_region(struct dm_io_region *io, struct mirror *m, io->count = bio->bi_size >> 9; } +static void hold_bio(struct mirror_set *ms, struct bio *bio) +{ + /* + * If device is suspended, complete the bio. + */ + if (atomic_read(&ms->suspend)) { + if (dm_noflush_suspending(ms->ti)) + bio_endio(bio, DM_ENDIO_REQUEUE); + else + bio_endio(bio, -EIO); + return; + } + + /* + * Hold bio until the suspend is complete. + */ + spin_lock_irq(&ms->lock); + bio_list_add(&ms->holds, bio); + spin_unlock_irq(&ms->lock); +} + /*----------------------------------------------------------------- * Reads *---------------------------------------------------------------*/ @@ -511,7 +581,6 @@ static void write_callback(unsigned long error, void *context) unsigned i, ret = 0; struct bio *bio = (struct bio *) context; struct mirror_set *ms; - int uptodate = 0; int should_wake = 0; unsigned long flags; @@ -524,36 +593,27 @@ static void write_callback(unsigned long error, void *context) * This way we handle both writes to SYNC and NOSYNC * regions with the same code. */ - if (likely(!error)) - goto out; + if (likely(!error)) { + bio_endio(bio, ret); + return; + } for (i = 0; i < ms->nr_mirrors; i++) if (test_bit(i, &error)) fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); - else - uptodate = 1; - if (unlikely(!uptodate)) { - DMERR("All replicated volumes dead, failing I/O"); - /* None of the writes succeeded, fail the I/O. */ - ret = -EIO; - } else if (errors_handled(ms)) { - /* - * Need to raise event. Since raising - * events can block, we need to do it in - * the main thread. - */ - spin_lock_irqsave(&ms->lock, flags); - if (!ms->failures.head) - should_wake = 1; - bio_list_add(&ms->failures, bio); - spin_unlock_irqrestore(&ms->lock, flags); - if (should_wake) - wakeup_mirrord(ms); - return; - } -out: - bio_endio(bio, ret); + /* + * Need to raise event. Since raising + * events can block, we need to do it in + * the main thread. + */ + spin_lock_irqsave(&ms->lock, flags); + if (!ms->failures.head) + should_wake = 1; + bio_list_add(&ms->failures, bio); + spin_unlock_irqrestore(&ms->lock, flags); + if (should_wake) + wakeup_mirrord(ms); } static void do_write(struct mirror_set *ms, struct bio *bio) @@ -562,7 +622,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct dm_io_region io[ms->nr_mirrors], *dest = io; struct mirror *m; struct dm_io_request io_req = { - .bi_rw = WRITE, + .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER), .mem.type = DM_IO_BVEC, .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, .notify.fn = write_callback, @@ -603,6 +663,11 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&requeue); while ((bio = bio_list_pop(writes))) { + if (unlikely(bio_empty_barrier(bio))) { + bio_list_add(&sync, bio); + continue; + } + region = dm_rh_bio_to_region(ms->rh, bio); if (log->type->is_remote_recovering && @@ -672,8 +737,12 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) dm_rh_delay(ms->rh, bio); while ((bio = bio_list_pop(&nosync))) { - map_bio(get_default_mirror(ms), bio); - generic_make_request(bio); + if (unlikely(ms->leg_failure) && errors_handled(ms)) + hold_bio(ms, bio); + else { + map_bio(get_default_mirror(ms), bio); + generic_make_request(bio); + } } } @@ -681,20 +750,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) { struct bio *bio; - if (!failures->head) - return; - - if (!ms->log_failure) { - while ((bio = bio_list_pop(failures))) { - ms->in_sync = 0; - dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); - } + if (likely(!failures->head)) return; - } /* * If the log has failed, unattempted writes are being - * put on the failures list. We can't issue those writes + * put on the holds list. We can't issue those writes * until a log has been marked, so we must store them. * * If a 'noflush' suspend is in progress, we can requeue @@ -709,23 +770,27 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) * for us to treat them the same and requeue them * as well. */ - if (dm_noflush_suspending(ms->ti)) { - while ((bio = bio_list_pop(failures))) - bio_endio(bio, DM_ENDIO_REQUEUE); - return; - } + while ((bio = bio_list_pop(failures))) { + if (!ms->log_failure) { + ms->in_sync = 0; + dm_rh_mark_nosync(ms->rh, bio); + } - if (atomic_read(&ms->suspend)) { - while ((bio = bio_list_pop(failures))) + /* + * If all the legs are dead, fail the I/O. + * If we have been told to handle errors, hold the bio + * and wait for userspace to deal with the problem. + * Otherwise pretend that the I/O succeeded. (This would + * be wrong if the failed leg returned after reboot and + * got replicated back to the good legs.) + */ + if (!get_valid_mirror(ms)) bio_endio(bio, -EIO); - return; + else if (errors_handled(ms)) + hold_bio(ms, bio); + else + bio_endio(bio, 0); } - - spin_lock_irq(&ms->lock); - bio_list_merge(&ms->failures, failures); - spin_unlock_irq(&ms->lock); - - delayed_wake(ms); } static void trigger_event(struct work_struct *work) @@ -784,12 +849,17 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, } spin_lock_init(&ms->lock); + bio_list_init(&ms->reads); + bio_list_init(&ms->writes); + bio_list_init(&ms->failures); + bio_list_init(&ms->holds); ms->ti = ti; ms->nr_mirrors = nr_mirrors; ms->nr_regions = dm_sector_div_up(ti->len, region_size); ms->in_sync = 0; ms->log_failure = 0; + ms->leg_failure = 0; atomic_set(&ms->suspend, 0); atomic_set(&ms->default_mirror, DEFAULT_MIRROR); @@ -889,7 +959,8 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, return NULL; } - dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2); + dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, + argv + 2); if (!dl) { ti->error = "Error creating mirror dirty log"; return NULL; @@ -995,6 +1066,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->private = ms; ti->split_io = dm_rh_get_region_size(ms->rh); + ti->num_flush_requests = 1; ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); if (!ms->kmirrord_wq) { @@ -1122,7 +1194,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - dm_rh_dec(ms->rh, map_context->ll); + if (likely(!bio_empty_barrier(bio))) + dm_rh_dec(ms->rh, map_context->ll); return error; } @@ -1180,6 +1253,9 @@ static void mirror_presuspend(struct dm_target *ti) struct mirror_set *ms = (struct mirror_set *) ti->private; struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + struct bio_list holds; + struct bio *bio; + atomic_set(&ms->suspend, 1); /* @@ -1202,6 +1278,22 @@ static void mirror_presuspend(struct dm_target *ti) * we know that all of our I/O has been pushed. */ flush_workqueue(ms->kmirrord_wq); + + /* + * Now set ms->suspend is set and the workqueue flushed, no more + * entries can be added to ms->hold list, so process it. + * + * Bios can still arrive concurrently with or after this + * presuspend function, but they cannot join the hold list + * because ms->suspend is set. + */ + spin_lock_irq(&ms->lock); + holds = ms->holds; + bio_list_init(&ms->holds); + spin_unlock_irq(&ms->lock); + + while ((bio = bio_list_pop(&holds))) + hold_bio(ms, bio); } static void mirror_postsuspend(struct dm_target *ti) @@ -1244,7 +1336,8 @@ static char device_status_char(struct mirror *m) if (!atomic_read(&(m->error_count))) return 'A'; - return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : + return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : + (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 36dbe29f2fd..5f19ceb6fe9 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -79,6 +79,11 @@ struct dm_region_hash { struct list_head recovered_regions; struct list_head failed_recovered_regions; + /* + * If there was a barrier failure no regions can be marked clean. + */ + int barrier_failure; + void *context; sector_t target_begin; @@ -211,6 +216,7 @@ struct dm_region_hash *dm_region_hash_create( INIT_LIST_HEAD(&rh->quiesced_regions); INIT_LIST_HEAD(&rh->recovered_regions); INIT_LIST_HEAD(&rh->failed_recovered_regions); + rh->barrier_failure = 0; rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, sizeof(struct dm_region)); @@ -377,8 +383,6 @@ static void complete_resync_work(struct dm_region *reg, int success) /* dm_rh_mark_nosync * @ms * @bio - * @done - * @error * * The bio was written on some mirror(s) but failed on other mirror(s). * We can successfully endio the bio but should avoid the region being @@ -386,8 +390,7 @@ static void complete_resync_work(struct dm_region *reg, int success) * * This function is _not_ safe in interrupt context! */ -void dm_rh_mark_nosync(struct dm_region_hash *rh, - struct bio *bio, unsigned done, int error) +void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) { unsigned long flags; struct dm_dirty_log *log = rh->log; @@ -395,6 +398,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, region_t region = dm_rh_bio_to_region(rh, bio); int recovering = 0; + if (bio_empty_barrier(bio)) { + rh->barrier_failure = 1; + return; + } + /* We must inform the log that the sync count has changed. */ log->type->set_region_sync(log, region, 0); @@ -419,7 +427,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, BUG_ON(!list_empty(®->list)); spin_unlock_irqrestore(&rh->region_lock, flags); - bio_endio(bio, error); if (recovering) complete_resync_work(reg, 0); } @@ -515,8 +522,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) { struct bio *bio; - for (bio = bios->head; bio; bio = bio->bi_next) + for (bio = bios->head; bio; bio = bio->bi_next) { + if (bio_empty_barrier(bio)) + continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); + } } EXPORT_SYMBOL_GPL(dm_rh_inc_pending); @@ -544,7 +554,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region) */ /* do nothing for DM_RH_NOSYNC */ - if (reg->state == DM_RH_RECOVERING) { + if (unlikely(rh->barrier_failure)) { + /* + * If a write barrier failed some time ago, we + * don't know whether or not this write made it + * to the disk, so we must resync the device. + */ + reg->state = DM_RH_NOSYNC; + } else if (reg->state == DM_RH_RECOVERING) { list_add_tail(®->list, &rh->quiesced_regions); } else if (reg->state == DM_RH_DIRTY) { reg->state = DM_RH_CLEAN; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 0c746420c00..7d08879689a 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -55,6 +55,8 @@ */ #define SNAPSHOT_DISK_VERSION 1 +#define NUM_SNAPSHOT_HDR_CHUNKS 1 + struct disk_header { uint32_t magic; @@ -120,7 +122,22 @@ struct pstore { /* * The next free chunk for an exception. + * + * When creating exceptions, all the chunks here and above are + * free. It holds the next chunk to be allocated. On rare + * occasions (e.g. after a system crash) holes can be left in + * the exception store because chunks can be committed out of + * order. + * + * When merging exceptions, it does not necessarily mean all the + * chunks here and above are free. It holds the value it would + * have held if all chunks had been committed in order of + * allocation. Consequently the value may occasionally be + * slightly too low, but since it's only used for 'status' and + * it can never reach its minimum value too early this doesn't + * matter. */ + chunk_t next_free; /* @@ -214,7 +231,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, int metadata) { struct dm_io_region where = { - .bdev = ps->store->cow->bdev, + .bdev = dm_snap_cow(ps->store->snap)->bdev, .sector = ps->store->chunk_size * chunk, .count = ps->store->chunk_size, }; @@ -294,7 +311,8 @@ static int read_header(struct pstore *ps, int *new_snapshot) */ if (!ps->store->chunk_size) { ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, - bdev_logical_block_size(ps->store->cow->bdev) >> 9); + bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> + bdev) >> 9); ps->store->chunk_mask = ps->store->chunk_size - 1; ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; chunk_size_supplied = 0; @@ -408,6 +426,15 @@ static void write_exception(struct pstore *ps, e->new_chunk = cpu_to_le64(de->new_chunk); } +static void clear_exception(struct pstore *ps, uint32_t index) +{ + struct disk_exception *e = get_exception(ps, index); + + /* clear it */ + e->old_chunk = 0; + e->new_chunk = 0; +} + /* * Registers the exceptions that are present in the current area. * 'full' is filled in to indicate if the area has been @@ -489,11 +516,23 @@ static struct pstore *get_info(struct dm_exception_store *store) return (struct pstore *) store->context; } -static void persistent_fraction_full(struct dm_exception_store *store, - sector_t *numerator, sector_t *denominator) +static void persistent_usage(struct dm_exception_store *store, + sector_t *total_sectors, + sector_t *sectors_allocated, + sector_t *metadata_sectors) { - *numerator = get_info(store)->next_free * store->chunk_size; - *denominator = get_dev_size(store->cow->bdev); + struct pstore *ps = get_info(store); + + *sectors_allocated = ps->next_free * store->chunk_size; + *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); + + /* + * First chunk is the fixed header. + * Then there are (ps->current_area + 1) metadata chunks, each one + * separated from the next by ps->exceptions_per_area data chunks. + */ + *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * + store->chunk_size; } static void persistent_dtr(struct dm_exception_store *store) @@ -552,44 +591,40 @@ static int persistent_read_metadata(struct dm_exception_store *store, ps->current_area = 0; zero_memory_area(ps); r = zero_disk_area(ps, 0); - if (r) { + if (r) DMWARN("zero_disk_area(0) failed"); - return r; - } - } else { - /* - * Sanity checks. - */ - if (ps->version != SNAPSHOT_DISK_VERSION) { - DMWARN("unable to handle snapshot disk version %d", - ps->version); - return -EINVAL; - } + return r; + } + /* + * Sanity checks. + */ + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); + return -EINVAL; + } - /* - * Metadata are valid, but snapshot is invalidated - */ - if (!ps->valid) - return 1; + /* + * Metadata are valid, but snapshot is invalidated + */ + if (!ps->valid) + return 1; - /* - * Read the metadata. - */ - r = read_exceptions(ps, callback, callback_context); - if (r) - return r; - } + /* + * Read the metadata. + */ + r = read_exceptions(ps, callback, callback_context); - return 0; + return r; } static int persistent_prepare_exception(struct dm_exception_store *store, - struct dm_snap_exception *e) + struct dm_exception *e) { struct pstore *ps = get_info(store); uint32_t stride; chunk_t next_free; - sector_t size = get_dev_size(store->cow->bdev); + sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); /* Is there enough room ? */ if (size < ((ps->next_free + 1) * store->chunk_size)) @@ -611,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store, } static void persistent_commit_exception(struct dm_exception_store *store, - struct dm_snap_exception *e, + struct dm_exception *e, void (*callback) (void *, int success), void *callback_context) { @@ -672,6 +707,85 @@ static void persistent_commit_exception(struct dm_exception_store *store, ps->callback_count = 0; } +static int persistent_prepare_merge(struct dm_exception_store *store, + chunk_t *last_old_chunk, + chunk_t *last_new_chunk) +{ + struct pstore *ps = get_info(store); + struct disk_exception de; + int nr_consecutive; + int r; + + /* + * When current area is empty, move back to preceding area. + */ + if (!ps->current_committed) { + /* + * Have we finished? + */ + if (!ps->current_area) + return 0; + + ps->current_area--; + r = area_io(ps, READ); + if (r < 0) + return r; + ps->current_committed = ps->exceptions_per_area; + } + + read_exception(ps, ps->current_committed - 1, &de); + *last_old_chunk = de.old_chunk; + *last_new_chunk = de.new_chunk; + + /* + * Find number of consecutive chunks within the current area, + * working backwards. + */ + for (nr_consecutive = 1; nr_consecutive < ps->current_committed; + nr_consecutive++) { + read_exception(ps, ps->current_committed - 1 - nr_consecutive, + &de); + if (de.old_chunk != *last_old_chunk - nr_consecutive || + de.new_chunk != *last_new_chunk - nr_consecutive) + break; + } + + return nr_consecutive; +} + +static int persistent_commit_merge(struct dm_exception_store *store, + int nr_merged) +{ + int r, i; + struct pstore *ps = get_info(store); + + BUG_ON(nr_merged > ps->current_committed); + + for (i = 0; i < nr_merged; i++) + clear_exception(ps, ps->current_committed - 1 - i); + + r = area_io(ps, WRITE); + if (r < 0) + return r; + + ps->current_committed -= nr_merged; + + /* + * At this stage, only persistent_usage() uses ps->next_free, so + * we make no attempt to keep ps->next_free strictly accurate + * as exceptions may have been committed out-of-order originally. + * Once a snapshot has become merging, we set it to the value it + * would have held had all the exceptions been committed in order. + * + * ps->current_area does not get reduced by prepare_merge() until + * after commit_merge() has removed the nr_merged previous exceptions. + */ + ps->next_free = (area_location(ps, ps->current_area) - 1) + + (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS; + + return 0; +} + static void persistent_drop_snapshot(struct dm_exception_store *store) { struct pstore *ps = get_info(store); @@ -697,7 +811,7 @@ static int persistent_ctr(struct dm_exception_store *store, ps->area = NULL; ps->zero_area = NULL; ps->header_area = NULL; - ps->next_free = 2; /* skipping the header and first area */ + ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ ps->current_committed = 0; ps->callback_count = 0; @@ -726,8 +840,7 @@ static unsigned persistent_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" %s P %llu", store->cow->name, - (unsigned long long)store->chunk_size); + DMEMIT(" P %llu", (unsigned long long)store->chunk_size); } return sz; @@ -741,8 +854,10 @@ static struct dm_exception_store_type _persistent_type = { .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, + .prepare_merge = persistent_prepare_merge, + .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, - .fraction_full = persistent_fraction_full, + .usage = persistent_usage, .status = persistent_status, }; @@ -754,8 +869,10 @@ static struct dm_exception_store_type _persistent_compat_type = { .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, + .prepare_merge = persistent_prepare_merge, + .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, - .fraction_full = persistent_fraction_full, + .usage = persistent_usage, .status = persistent_status, }; diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index cde5aa558e6..a0898a66a2f 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c @@ -36,10 +36,10 @@ static int transient_read_metadata(struct dm_exception_store *store, } static int transient_prepare_exception(struct dm_exception_store *store, - struct dm_snap_exception *e) + struct dm_exception *e) { struct transient_c *tc = store->context; - sector_t size = get_dev_size(store->cow->bdev); + sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); if (size < (tc->next_free + store->chunk_size)) return -1; @@ -51,7 +51,7 @@ static int transient_prepare_exception(struct dm_exception_store *store, } static void transient_commit_exception(struct dm_exception_store *store, - struct dm_snap_exception *e, + struct dm_exception *e, void (*callback) (void *, int success), void *callback_context) { @@ -59,11 +59,14 @@ static void transient_commit_exception(struct dm_exception_store *store, callback(callback_context, 1); } -static void transient_fraction_full(struct dm_exception_store *store, - sector_t *numerator, sector_t *denominator) +static void transient_usage(struct dm_exception_store *store, + sector_t *total_sectors, + sector_t *sectors_allocated, + sector_t *metadata_sectors) { - *numerator = ((struct transient_c *) store->context)->next_free; - *denominator = get_dev_size(store->cow->bdev); + *sectors_allocated = ((struct transient_c *) store->context)->next_free; + *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); + *metadata_sectors = 0; } static int transient_ctr(struct dm_exception_store *store, @@ -91,8 +94,7 @@ static unsigned transient_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" %s N %llu", store->cow->name, - (unsigned long long)store->chunk_size); + DMEMIT(" N %llu", (unsigned long long)store->chunk_size); } return sz; @@ -106,7 +108,7 @@ static struct dm_exception_store_type _transient_type = { .read_metadata = transient_read_metadata, .prepare_exception = transient_prepare_exception, .commit_exception = transient_commit_exception, - .fraction_full = transient_fraction_full, + .usage = transient_usage, .status = transient_status, }; @@ -118,7 +120,7 @@ static struct dm_exception_store_type _transient_compat_type = { .read_metadata = transient_read_metadata, .prepare_exception = transient_prepare_exception, .commit_exception = transient_commit_exception, - .fraction_full = transient_fraction_full, + .usage = transient_usage, .status = transient_status, }; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 3a3ba46e6d4..ee8eb283650 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -25,6 +25,11 @@ #define DM_MSG_PREFIX "snapshots" +static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; + +#define dm_target_is_snapshot_merge(ti) \ + ((ti)->type->name == dm_snapshot_merge_target_name) + /* * The percentage increment we will wake up users at */ @@ -49,7 +54,7 @@ #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ (DM_TRACKED_CHUNK_HASH_SIZE - 1)) -struct exception_table { +struct dm_exception_table { uint32_t hash_mask; unsigned hash_shift; struct list_head *table; @@ -59,22 +64,31 @@ struct dm_snapshot { struct rw_semaphore lock; struct dm_dev *origin; + struct dm_dev *cow; + + struct dm_target *ti; /* List of snapshots per Origin */ struct list_head list; - /* You can't use a snapshot if this is 0 (e.g. if full) */ + /* + * You can't use a snapshot if this is 0 (e.g. if full). + * A snapshot-merge target never clears this. + */ int valid; /* Origin writes don't trigger exceptions until this is set */ int active; + /* Whether or not owning mapped_device is suspended */ + int suspended; + mempool_t *pending_pool; atomic_t pending_exceptions_count; - struct exception_table pending; - struct exception_table complete; + struct dm_exception_table pending; + struct dm_exception_table complete; /* * pe_lock protects all pending_exception operations and access @@ -95,8 +109,51 @@ struct dm_snapshot { mempool_t *tracked_chunk_pool; spinlock_t tracked_chunk_lock; struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; + + /* + * The merge operation failed if this flag is set. + * Failure modes are handled as follows: + * - I/O error reading the header + * => don't load the target; abort. + * - Header does not have "valid" flag set + * => use the origin; forget about the snapshot. + * - I/O error when reading exceptions + * => don't load the target; abort. + * (We can't use the intermediate origin state.) + * - I/O error while merging + * => stop merging; set merge_failed; process I/O normally. + */ + int merge_failed; + + /* Wait for events based on state_bits */ + unsigned long state_bits; + + /* Range of chunks currently being merged. */ + chunk_t first_merging_chunk; + int num_merging_chunks; + + /* + * Incoming bios that overlap with chunks being merged must wait + * for them to be committed. + */ + struct bio_list bios_queued_during_merge; }; +/* + * state_bits: + * RUNNING_MERGE - Merge operation is in progress. + * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; + * cleared afterwards. + */ +#define RUNNING_MERGE 0 +#define SHUTDOWN_MERGE 1 + +struct dm_dev *dm_snap_cow(struct dm_snapshot *s) +{ + return s->cow; +} +EXPORT_SYMBOL(dm_snap_cow); + static struct workqueue_struct *ksnapd; static void flush_queued_bios(struct work_struct *work); @@ -116,7 +173,7 @@ static int bdev_equal(struct block_device *lhs, struct block_device *rhs) } struct dm_snap_pending_exception { - struct dm_snap_exception e; + struct dm_exception e; /* * Origin buffers waiting for this to complete are held @@ -125,28 +182,6 @@ struct dm_snap_pending_exception { struct bio_list origin_bios; struct bio_list snapshot_bios; - /* - * Short-term queue of pending exceptions prior to submission. - */ - struct list_head list; - - /* - * The primary pending_exception is the one that holds - * the ref_count and the list of origin_bios for a - * group of pending_exceptions. It is always last to get freed. - * These fields get set up when writing to the origin. - */ - struct dm_snap_pending_exception *primary_pe; - - /* - * Number of pending_exceptions processing this chunk. - * When this drops to zero we must complete the origin bios. - * If incrementing or decrementing this, hold pe->snap->lock for - * the sibling concerned and not pe->primary_pe->snap->lock unless - * they are the same. - */ - atomic_t ref_count; - /* Pointer back to snapshot context */ struct dm_snapshot *snap; @@ -222,6 +257,16 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) } /* + * This conflicting I/O is extremely improbable in the caller, + * so msleep(1) is sufficient and there is no need for a wait queue. + */ +static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) +{ + while (__chunk_is_tracked(s, chunk)) + msleep(1); +} + +/* * One of these per registered origin, held in the snapshot_origins hash */ struct origin { @@ -243,6 +288,10 @@ struct origin { static struct list_head *_origins; static struct rw_semaphore _origins_lock; +static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); +static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); +static uint64_t _pending_exceptions_done_count; + static int init_origin_hash(void) { int i; @@ -291,22 +340,144 @@ static void __insert_origin(struct origin *o) } /* + * _origins_lock must be held when calling this function. + * Returns number of snapshots registered using the supplied cow device, plus: + * snap_src - a snapshot suitable for use as a source of exception handover + * snap_dest - a snapshot capable of receiving exception handover. + * snap_merge - an existing snapshot-merge target linked to the same origin. + * There can be at most one snapshot-merge target. The parameter is optional. + * + * Possible return values and states of snap_src and snap_dest. + * 0: NULL, NULL - first new snapshot + * 1: snap_src, NULL - normal snapshot + * 2: snap_src, snap_dest - waiting for handover + * 2: snap_src, NULL - handed over, waiting for old to be deleted + * 1: NULL, snap_dest - source got destroyed without handover + */ +static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, + struct dm_snapshot **snap_src, + struct dm_snapshot **snap_dest, + struct dm_snapshot **snap_merge) +{ + struct dm_snapshot *s; + struct origin *o; + int count = 0; + int active; + + o = __lookup_origin(snap->origin->bdev); + if (!o) + goto out; + + list_for_each_entry(s, &o->snapshots, list) { + if (dm_target_is_snapshot_merge(s->ti) && snap_merge) + *snap_merge = s; + if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) + continue; + + down_read(&s->lock); + active = s->active; + up_read(&s->lock); + + if (active) { + if (snap_src) + *snap_src = s; + } else if (snap_dest) + *snap_dest = s; + + count++; + } + +out: + return count; +} + +/* + * On success, returns 1 if this snapshot is a handover destination, + * otherwise returns 0. + */ +static int __validate_exception_handover(struct dm_snapshot *snap) +{ + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; + struct dm_snapshot *snap_merge = NULL; + + /* Does snapshot need exceptions handed over to it? */ + if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, + &snap_merge) == 2) || + snap_dest) { + snap->ti->error = "Snapshot cow pairing for exception " + "table handover failed"; + return -EINVAL; + } + + /* + * If no snap_src was found, snap cannot become a handover + * destination. + */ + if (!snap_src) + return 0; + + /* + * Non-snapshot-merge handover? + */ + if (!dm_target_is_snapshot_merge(snap->ti)) + return 1; + + /* + * Do not allow more than one merging snapshot. + */ + if (snap_merge) { + snap->ti->error = "A snapshot is already merging."; + return -EINVAL; + } + + if (!snap_src->store->type->prepare_merge || + !snap_src->store->type->commit_merge) { + snap->ti->error = "Snapshot exception store does not " + "support snapshot-merge."; + return -EINVAL; + } + + return 1; +} + +static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) +{ + struct dm_snapshot *l; + + /* Sort the list according to chunk size, largest-first smallest-last */ + list_for_each_entry(l, &o->snapshots, list) + if (l->store->chunk_size < s->store->chunk_size) + break; + list_add_tail(&s->list, &l->list); +} + +/* * Make a note of the snapshot and its origin so we can look it * up when the origin has a write on it. + * + * Also validate snapshot exception store handovers. + * On success, returns 1 if this registration is a handover destination, + * otherwise returns 0. */ static int register_snapshot(struct dm_snapshot *snap) { - struct dm_snapshot *l; - struct origin *o, *new_o; + struct origin *o, *new_o = NULL; struct block_device *bdev = snap->origin->bdev; + int r = 0; new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); if (!new_o) return -ENOMEM; down_write(&_origins_lock); - o = __lookup_origin(bdev); + r = __validate_exception_handover(snap); + if (r < 0) { + kfree(new_o); + goto out; + } + + o = __lookup_origin(bdev); if (o) kfree(new_o); else { @@ -320,14 +491,27 @@ static int register_snapshot(struct dm_snapshot *snap) __insert_origin(o); } - /* Sort the list according to chunk size, largest-first smallest-last */ - list_for_each_entry(l, &o->snapshots, list) - if (l->store->chunk_size < snap->store->chunk_size) - break; - list_add_tail(&snap->list, &l->list); + __insert_snapshot(o, snap); + +out: + up_write(&_origins_lock); + + return r; +} + +/* + * Move snapshot to correct place in list according to chunk size. + */ +static void reregister_snapshot(struct dm_snapshot *s) +{ + struct block_device *bdev = s->origin->bdev; + + down_write(&_origins_lock); + + list_del(&s->list); + __insert_snapshot(__lookup_origin(bdev), s); up_write(&_origins_lock); - return 0; } static void unregister_snapshot(struct dm_snapshot *s) @@ -338,7 +522,7 @@ static void unregister_snapshot(struct dm_snapshot *s) o = __lookup_origin(s->origin->bdev); list_del(&s->list); - if (list_empty(&o->snapshots)) { + if (o && list_empty(&o->snapshots)) { list_del(&o->hash_list); kfree(o); } @@ -351,8 +535,8 @@ static void unregister_snapshot(struct dm_snapshot *s) * The lowest hash_shift bits of the chunk number are ignored, allowing * some consecutive chunks to be grouped together. */ -static int init_exception_table(struct exception_table *et, uint32_t size, - unsigned hash_shift) +static int dm_exception_table_init(struct dm_exception_table *et, + uint32_t size, unsigned hash_shift) { unsigned int i; @@ -368,10 +552,11 @@ static int init_exception_table(struct exception_table *et, uint32_t size, return 0; } -static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) +static void dm_exception_table_exit(struct dm_exception_table *et, + struct kmem_cache *mem) { struct list_head *slot; - struct dm_snap_exception *ex, *next; + struct dm_exception *ex, *next; int i, size; size = et->hash_mask + 1; @@ -385,19 +570,12 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache * vfree(et->table); } -static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) +static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) { return (chunk >> et->hash_shift) & et->hash_mask; } -static void insert_exception(struct exception_table *eh, - struct dm_snap_exception *e) -{ - struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; - list_add(&e->hash_list, l); -} - -static void remove_exception(struct dm_snap_exception *e) +static void dm_remove_exception(struct dm_exception *e) { list_del(&e->hash_list); } @@ -406,11 +584,11 @@ static void remove_exception(struct dm_snap_exception *e) * Return the exception data for a sector, or NULL if not * remapped. */ -static struct dm_snap_exception *lookup_exception(struct exception_table *et, - chunk_t chunk) +static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, + chunk_t chunk) { struct list_head *slot; - struct dm_snap_exception *e; + struct dm_exception *e; slot = &et->table[exception_hash(et, chunk)]; list_for_each_entry (e, slot, hash_list) @@ -421,9 +599,9 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et, return NULL; } -static struct dm_snap_exception *alloc_exception(void) +static struct dm_exception *alloc_completed_exception(void) { - struct dm_snap_exception *e; + struct dm_exception *e; e = kmem_cache_alloc(exception_cache, GFP_NOIO); if (!e) @@ -432,7 +610,7 @@ static struct dm_snap_exception *alloc_exception(void) return e; } -static void free_exception(struct dm_snap_exception *e) +static void free_completed_exception(struct dm_exception *e) { kmem_cache_free(exception_cache, e); } @@ -457,12 +635,11 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) atomic_dec(&s->pending_exceptions_count); } -static void insert_completed_exception(struct dm_snapshot *s, - struct dm_snap_exception *new_e) +static void dm_insert_exception(struct dm_exception_table *eh, + struct dm_exception *new_e) { - struct exception_table *eh = &s->complete; struct list_head *l; - struct dm_snap_exception *e = NULL; + struct dm_exception *e = NULL; l = &eh->table[exception_hash(eh, new_e->old_chunk)]; @@ -478,7 +655,7 @@ static void insert_completed_exception(struct dm_snapshot *s, new_e->new_chunk == (dm_chunk_number(e->new_chunk) + dm_consecutive_chunk_count(e) + 1)) { dm_consecutive_chunk_count_inc(e); - free_exception(new_e); + free_completed_exception(new_e); return; } @@ -488,7 +665,7 @@ static void insert_completed_exception(struct dm_snapshot *s, dm_consecutive_chunk_count_inc(e); e->old_chunk--; e->new_chunk--; - free_exception(new_e); + free_completed_exception(new_e); return; } @@ -507,9 +684,9 @@ out: static int dm_add_exception(void *context, chunk_t old, chunk_t new) { struct dm_snapshot *s = context; - struct dm_snap_exception *e; + struct dm_exception *e; - e = alloc_exception(); + e = alloc_completed_exception(); if (!e) return -ENOMEM; @@ -518,11 +695,30 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) /* Consecutive_count is implicitly initialised to zero */ e->new_chunk = new; - insert_completed_exception(s, e); + dm_insert_exception(&s->complete, e); return 0; } +#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r))) + +/* + * Return a minimum chunk size of all snapshots that have the specified origin. + * Return zero if the origin has no snapshots. + */ +static sector_t __minimum_chunk_size(struct origin *o) +{ + struct dm_snapshot *snap; + unsigned chunk_size = 0; + + if (o) + list_for_each_entry(snap, &o->snapshots, list) + chunk_size = min_not_zero(chunk_size, + snap->store->chunk_size); + + return chunk_size; +} + /* * Hard coded magic. */ @@ -546,16 +742,18 @@ static int init_hash_tables(struct dm_snapshot *s) * Calculate based on the size of the original volume or * the COW volume... */ - cow_dev_size = get_dev_size(s->store->cow->bdev); + cow_dev_size = get_dev_size(s->cow->bdev); origin_dev_size = get_dev_size(s->origin->bdev); max_buckets = calc_max_buckets(); hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; hash_size = min(hash_size, max_buckets); + if (hash_size < 64) + hash_size = 64; hash_size = rounddown_pow_of_two(hash_size); - if (init_exception_table(&s->complete, hash_size, - DM_CHUNK_CONSECUTIVE_BITS)) + if (dm_exception_table_init(&s->complete, hash_size, + DM_CHUNK_CONSECUTIVE_BITS)) return -ENOMEM; /* @@ -566,14 +764,284 @@ static int init_hash_tables(struct dm_snapshot *s) if (hash_size < 64) hash_size = 64; - if (init_exception_table(&s->pending, hash_size, 0)) { - exit_exception_table(&s->complete, exception_cache); + if (dm_exception_table_init(&s->pending, hash_size, 0)) { + dm_exception_table_exit(&s->complete, exception_cache); return -ENOMEM; } return 0; } +static void merge_shutdown(struct dm_snapshot *s) +{ + clear_bit_unlock(RUNNING_MERGE, &s->state_bits); + smp_mb__after_clear_bit(); + wake_up_bit(&s->state_bits, RUNNING_MERGE); +} + +static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) +{ + s->first_merging_chunk = 0; + s->num_merging_chunks = 0; + + return bio_list_get(&s->bios_queued_during_merge); +} + +/* + * Remove one chunk from the index of completed exceptions. + */ +static int __remove_single_exception_chunk(struct dm_snapshot *s, + chunk_t old_chunk) +{ + struct dm_exception *e; + + e = dm_lookup_exception(&s->complete, old_chunk); + if (!e) { + DMERR("Corruption detected: exception for block %llu is " + "on disk but not in memory", + (unsigned long long)old_chunk); + return -EINVAL; + } + + /* + * If this is the only chunk using this exception, remove exception. + */ + if (!dm_consecutive_chunk_count(e)) { + dm_remove_exception(e); + free_completed_exception(e); + return 0; + } + + /* + * The chunk may be either at the beginning or the end of a + * group of consecutive chunks - never in the middle. We are + * removing chunks in the opposite order to that in which they + * were added, so this should always be true. + * Decrement the consecutive chunk counter and adjust the + * starting point if necessary. + */ + if (old_chunk == e->old_chunk) { + e->old_chunk++; + e->new_chunk++; + } else if (old_chunk != e->old_chunk + + dm_consecutive_chunk_count(e)) { + DMERR("Attempt to merge block %llu from the " + "middle of a chunk range [%llu - %llu]", + (unsigned long long)old_chunk, + (unsigned long long)e->old_chunk, + (unsigned long long) + e->old_chunk + dm_consecutive_chunk_count(e)); + return -EINVAL; + } + + dm_consecutive_chunk_count_dec(e); + + return 0; +} + +static void flush_bios(struct bio *bio); + +static int remove_single_exception_chunk(struct dm_snapshot *s) +{ + struct bio *b = NULL; + int r; + chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; + + down_write(&s->lock); + + /* + * Process chunks (and associated exceptions) in reverse order + * so that dm_consecutive_chunk_count_dec() accounting works. + */ + do { + r = __remove_single_exception_chunk(s, old_chunk); + if (r) + goto out; + } while (old_chunk-- > s->first_merging_chunk); + + b = __release_queued_bios_after_merge(s); + +out: + up_write(&s->lock); + if (b) + flush_bios(b); + + return r; +} + +static int origin_write_extent(struct dm_snapshot *merging_snap, + sector_t sector, unsigned chunk_size); + +static void merge_callback(int read_err, unsigned long write_err, + void *context); + +static uint64_t read_pending_exceptions_done_count(void) +{ + uint64_t pending_exceptions_done; + + spin_lock(&_pending_exceptions_done_spinlock); + pending_exceptions_done = _pending_exceptions_done_count; + spin_unlock(&_pending_exceptions_done_spinlock); + + return pending_exceptions_done; +} + +static void increment_pending_exceptions_done_count(void) +{ + spin_lock(&_pending_exceptions_done_spinlock); + _pending_exceptions_done_count++; + spin_unlock(&_pending_exceptions_done_spinlock); + + wake_up_all(&_pending_exceptions_done); +} + +static void snapshot_merge_next_chunks(struct dm_snapshot *s) +{ + int i, linear_chunks; + chunk_t old_chunk, new_chunk; + struct dm_io_region src, dest; + sector_t io_size; + uint64_t previous_count; + + BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); + if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) + goto shut; + + /* + * valid flag never changes during merge, so no lock required. + */ + if (!s->valid) { + DMERR("Snapshot is invalid: can't merge"); + goto shut; + } + + linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, + &new_chunk); + if (linear_chunks <= 0) { + if (linear_chunks < 0) { + DMERR("Read error in exception store: " + "shutting down merge"); + down_write(&s->lock); + s->merge_failed = 1; + up_write(&s->lock); + } + goto shut; + } + + /* Adjust old_chunk and new_chunk to reflect start of linear region */ + old_chunk = old_chunk + 1 - linear_chunks; + new_chunk = new_chunk + 1 - linear_chunks; + + /* + * Use one (potentially large) I/O to copy all 'linear_chunks' + * from the exception store to the origin + */ + io_size = linear_chunks * s->store->chunk_size; + + dest.bdev = s->origin->bdev; + dest.sector = chunk_to_sector(s->store, old_chunk); + dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); + + src.bdev = s->cow->bdev; + src.sector = chunk_to_sector(s->store, new_chunk); + src.count = dest.count; + + /* + * Reallocate any exceptions needed in other snapshots then + * wait for the pending exceptions to complete. + * Each time any pending exception (globally on the system) + * completes we are woken and repeat the process to find out + * if we can proceed. While this may not seem a particularly + * efficient algorithm, it is not expected to have any + * significant impact on performance. + */ + previous_count = read_pending_exceptions_done_count(); + while (origin_write_extent(s, dest.sector, io_size)) { + wait_event(_pending_exceptions_done, + (read_pending_exceptions_done_count() != + previous_count)); + /* Retry after the wait, until all exceptions are done. */ + previous_count = read_pending_exceptions_done_count(); + } + + down_write(&s->lock); + s->first_merging_chunk = old_chunk; + s->num_merging_chunks = linear_chunks; + up_write(&s->lock); + + /* Wait until writes to all 'linear_chunks' drain */ + for (i = 0; i < linear_chunks; i++) + __check_for_conflicting_io(s, old_chunk + i); + + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); + return; + +shut: + merge_shutdown(s); +} + +static void error_bios(struct bio *bio); + +static void merge_callback(int read_err, unsigned long write_err, void *context) +{ + struct dm_snapshot *s = context; + struct bio *b = NULL; + + if (read_err || write_err) { + if (read_err) + DMERR("Read error: shutting down merge."); + else + DMERR("Write error: shutting down merge."); + goto shut; + } + + if (s->store->type->commit_merge(s->store, + s->num_merging_chunks) < 0) { + DMERR("Write error in exception store: shutting down merge"); + goto shut; + } + + if (remove_single_exception_chunk(s) < 0) + goto shut; + + snapshot_merge_next_chunks(s); + + return; + +shut: + down_write(&s->lock); + s->merge_failed = 1; + b = __release_queued_bios_after_merge(s); + up_write(&s->lock); + error_bios(b); + + merge_shutdown(s); +} + +static void start_merge(struct dm_snapshot *s) +{ + if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) + snapshot_merge_next_chunks(s); +} + +static int wait_schedule(void *ptr) +{ + schedule(); + + return 0; +} + +/* + * Stop the merging process and wait until it finishes. + */ +static void stop_merge(struct dm_snapshot *s) +{ + set_bit(SHUTDOWN_MERGE, &s->state_bits); + wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule, + TASK_UNINTERRUPTIBLE); + clear_bit(SHUTDOWN_MERGE, &s->state_bits); +} + /* * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> */ @@ -582,50 +1050,73 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) struct dm_snapshot *s; int i; int r = -EINVAL; - char *origin_path; - struct dm_exception_store *store; - unsigned args_used; + char *origin_path, *cow_path; + unsigned args_used, num_flush_requests = 1; + fmode_t origin_mode = FMODE_READ; if (argc != 4) { ti->error = "requires exactly 4 arguments"; r = -EINVAL; - goto bad_args; + goto bad; + } + + if (dm_target_is_snapshot_merge(ti)) { + num_flush_requests = 2; + origin_mode = FMODE_WRITE; } origin_path = argv[0]; argv++; argc--; - r = dm_exception_store_create(ti, argc, argv, &args_used, &store); + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) { + ti->error = "Cannot allocate snapshot context private " + "structure"; + r = -ENOMEM; + goto bad; + } + + cow_path = argv[0]; + argv++; + argc--; + + r = dm_get_device(ti, cow_path, 0, 0, + FMODE_READ | FMODE_WRITE, &s->cow); + if (r) { + ti->error = "Cannot get COW device"; + goto bad_cow; + } + + r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); if (r) { ti->error = "Couldn't create exception store"; r = -EINVAL; - goto bad_args; + goto bad_store; } argv += args_used; argc -= args_used; - s = kmalloc(sizeof(*s), GFP_KERNEL); - if (!s) { - ti->error = "Cannot allocate snapshot context private " - "structure"; - r = -ENOMEM; - goto bad_snap; - } - - r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); + r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin); if (r) { ti->error = "Cannot get origin device"; goto bad_origin; } - s->store = store; + s->ti = ti; s->valid = 1; s->active = 0; + s->suspended = 0; atomic_set(&s->pending_exceptions_count, 0); init_rwsem(&s->lock); + INIT_LIST_HEAD(&s->list); spin_lock_init(&s->pe_lock); + s->state_bits = 0; + s->merge_failed = 0; + s->first_merging_chunk = 0; + s->num_merging_chunks = 0; + bio_list_init(&s->bios_queued_during_merge); /* Allocate hash table for COW data */ if (init_hash_tables(s)) { @@ -659,39 +1150,55 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) spin_lock_init(&s->tracked_chunk_lock); - /* Metadata must only be loaded into one table at once */ + bio_list_init(&s->queued_bios); + INIT_WORK(&s->queued_bios_work, flush_queued_bios); + + ti->private = s; + ti->num_flush_requests = num_flush_requests; + + /* Add snapshot to the list of snapshots for this origin */ + /* Exceptions aren't triggered till snapshot_resume() is called */ + r = register_snapshot(s); + if (r == -ENOMEM) { + ti->error = "Snapshot origin struct allocation failed"; + goto bad_load_and_register; + } else if (r < 0) { + /* invalid handover, register_snapshot has set ti->error */ + goto bad_load_and_register; + } + + /* + * Metadata must only be loaded into one table at once, so skip this + * if metadata will be handed over during resume. + * Chunk size will be set during the handover - set it to zero to + * ensure it's ignored. + */ + if (r > 0) { + s->store->chunk_size = 0; + return 0; + } + r = s->store->type->read_metadata(s->store, dm_add_exception, (void *)s); if (r < 0) { ti->error = "Failed to read snapshot metadata"; - goto bad_load_and_register; + goto bad_read_metadata; } else if (r > 0) { s->valid = 0; DMWARN("Snapshot is marked invalid."); } - bio_list_init(&s->queued_bios); - INIT_WORK(&s->queued_bios_work, flush_queued_bios); - if (!s->store->chunk_size) { ti->error = "Chunk size not set"; - goto bad_load_and_register; - } - - /* Add snapshot to the list of snapshots for this origin */ - /* Exceptions aren't triggered till snapshot_resume() is called */ - if (register_snapshot(s)) { - r = -EINVAL; - ti->error = "Cannot register snapshot origin"; - goto bad_load_and_register; + goto bad_read_metadata; } - - ti->private = s; ti->split_io = s->store->chunk_size; - ti->num_flush_requests = 1; return 0; +bad_read_metadata: + unregister_snapshot(s); + bad_load_and_register: mempool_destroy(s->tracked_chunk_pool); @@ -702,19 +1209,22 @@ bad_pending_pool: dm_kcopyd_client_destroy(s->kcopyd_client); bad_kcopyd: - exit_exception_table(&s->pending, pending_cache); - exit_exception_table(&s->complete, exception_cache); + dm_exception_table_exit(&s->pending, pending_cache); + dm_exception_table_exit(&s->complete, exception_cache); bad_hash_tables: dm_put_device(ti, s->origin); bad_origin: - kfree(s); + dm_exception_store_destroy(s->store); -bad_snap: - dm_exception_store_destroy(store); +bad_store: + dm_put_device(ti, s->cow); + +bad_cow: + kfree(s); -bad_args: +bad: return r; } @@ -723,8 +1233,39 @@ static void __free_exceptions(struct dm_snapshot *s) dm_kcopyd_client_destroy(s->kcopyd_client); s->kcopyd_client = NULL; - exit_exception_table(&s->pending, pending_cache); - exit_exception_table(&s->complete, exception_cache); + dm_exception_table_exit(&s->pending, pending_cache); + dm_exception_table_exit(&s->complete, exception_cache); +} + +static void __handover_exceptions(struct dm_snapshot *snap_src, + struct dm_snapshot *snap_dest) +{ + union { + struct dm_exception_table table_swap; + struct dm_exception_store *store_swap; + } u; + + /* + * Swap all snapshot context information between the two instances. + */ + u.table_swap = snap_dest->complete; + snap_dest->complete = snap_src->complete; + snap_src->complete = u.table_swap; + + u.store_swap = snap_dest->store; + snap_dest->store = snap_src->store; + snap_src->store = u.store_swap; + + snap_dest->store->snap = snap_dest; + snap_src->store->snap = snap_src; + + snap_dest->ti->split_io = snap_dest->store->chunk_size; + snap_dest->valid = snap_src->valid; + + /* + * Set source invalid to ensure it receives no further I/O. + */ + snap_src->valid = 0; } static void snapshot_dtr(struct dm_target *ti) @@ -733,9 +1274,24 @@ static void snapshot_dtr(struct dm_target *ti) int i; #endif struct dm_snapshot *s = ti->private; + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; flush_workqueue(ksnapd); + down_read(&_origins_lock); + /* Check whether exception handover must be cancelled */ + (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); + if (snap_src && snap_dest && (s == snap_src)) { + down_write(&snap_dest->lock); + snap_dest->valid = 0; + up_write(&snap_dest->lock); + DMERR("Cancelling snapshot handover."); + } + up_read(&_origins_lock); + + if (dm_target_is_snapshot_merge(ti)) + stop_merge(s); + /* Prevent further origin writes from using this snapshot. */ /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); @@ -763,6 +1319,8 @@ static void snapshot_dtr(struct dm_target *ti) dm_exception_store_destroy(s->store); + dm_put_device(ti, s->cow); + kfree(s); } @@ -795,6 +1353,26 @@ static void flush_queued_bios(struct work_struct *work) flush_bios(queued_bios); } +static int do_origin(struct dm_dev *origin, struct bio *bio); + +/* + * Flush a list of buffers. + */ +static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) +{ + struct bio *n; + int r; + + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; + r = do_origin(s->origin, bio); + if (r == DM_MAPIO_REMAPPED) + generic_make_request(bio); + bio = n; + } +} + /* * Error a list of buffers. */ @@ -825,45 +1403,12 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) s->valid = 0; - dm_table_event(s->store->ti->table); -} - -static void get_pending_exception(struct dm_snap_pending_exception *pe) -{ - atomic_inc(&pe->ref_count); -} - -static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) -{ - struct dm_snap_pending_exception *primary_pe; - struct bio *origin_bios = NULL; - - primary_pe = pe->primary_pe; - - /* - * If this pe is involved in a write to the origin and - * it is the last sibling to complete then release - * the bios for the original write to the origin. - */ - if (primary_pe && - atomic_dec_and_test(&primary_pe->ref_count)) { - origin_bios = bio_list_get(&primary_pe->origin_bios); - free_pending_exception(primary_pe); - } - - /* - * Free the pe if it's not linked to an origin write or if - * it's not itself a primary pe. - */ - if (!primary_pe || primary_pe != pe) - free_pending_exception(pe); - - return origin_bios; + dm_table_event(s->ti->table); } static void pending_complete(struct dm_snap_pending_exception *pe, int success) { - struct dm_snap_exception *e; + struct dm_exception *e; struct dm_snapshot *s = pe->snap; struct bio *origin_bios = NULL; struct bio *snapshot_bios = NULL; @@ -877,7 +1422,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) goto out; } - e = alloc_exception(); + e = alloc_completed_exception(); if (!e) { down_write(&s->lock); __invalidate_snapshot(s, -ENOMEM); @@ -888,28 +1433,27 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) down_write(&s->lock); if (!s->valid) { - free_exception(e); + free_completed_exception(e); error = 1; goto out; } - /* - * Check for conflicting reads. This is extremely improbable, - * so msleep(1) is sufficient and there is no need for a wait queue. - */ - while (__chunk_is_tracked(s, pe->e.old_chunk)) - msleep(1); + /* Check for conflicting reads */ + __check_for_conflicting_io(s, pe->e.old_chunk); /* * Add a proper exception, and remove the * in-flight exception from the list. */ - insert_completed_exception(s, e); + dm_insert_exception(&s->complete, e); out: - remove_exception(&pe->e); + dm_remove_exception(&pe->e); snapshot_bios = bio_list_get(&pe->snapshot_bios); - origin_bios = put_pending_exception(pe); + origin_bios = bio_list_get(&pe->origin_bios); + free_pending_exception(pe); + + increment_pending_exceptions_done_count(); up_write(&s->lock); @@ -919,7 +1463,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) else flush_bios(snapshot_bios); - flush_bios(origin_bios); + retry_origin_bios(s, origin_bios); } static void commit_callback(void *context, int success) @@ -963,7 +1507,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) src.sector = chunk_to_sector(s->store, pe->e.old_chunk); src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); - dest.bdev = s->store->cow->bdev; + dest.bdev = s->cow->bdev; dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); dest.count = src.count; @@ -975,7 +1519,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) static struct dm_snap_pending_exception * __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) { - struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); + struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); if (!e) return NULL; @@ -1006,8 +1550,6 @@ __find_pending_exception(struct dm_snapshot *s, pe->e.old_chunk = chunk; bio_list_init(&pe->origin_bios); bio_list_init(&pe->snapshot_bios); - pe->primary_pe = NULL; - atomic_set(&pe->ref_count, 0); pe->started = 0; if (s->store->type->prepare_exception(s->store, &pe->e)) { @@ -1015,16 +1557,15 @@ __find_pending_exception(struct dm_snapshot *s, return NULL; } - get_pending_exception(pe); - insert_exception(&s->pending, &pe->e); + dm_insert_exception(&s->pending, &pe->e); return pe; } -static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, +static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) { - bio->bi_bdev = s->store->cow->bdev; + bio->bi_bdev = s->cow->bdev; bio->bi_sector = chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + (chunk - e->old_chunk)) + @@ -1035,14 +1576,14 @@ static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, static int snapshot_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { - struct dm_snap_exception *e; + struct dm_exception *e; struct dm_snapshot *s = ti->private; int r = DM_MAPIO_REMAPPED; chunk_t chunk; struct dm_snap_pending_exception *pe = NULL; if (unlikely(bio_empty_barrier(bio))) { - bio->bi_bdev = s->store->cow->bdev; + bio->bi_bdev = s->cow->bdev; return DM_MAPIO_REMAPPED; } @@ -1063,7 +1604,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, } /* If the block is already remapped - use that, else remap it */ - e = lookup_exception(&s->complete, chunk); + e = dm_lookup_exception(&s->complete, chunk); if (e) { remap_exception(s, e, bio, chunk); goto out_unlock; @@ -1087,7 +1628,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, goto out_unlock; } - e = lookup_exception(&s->complete, chunk); + e = dm_lookup_exception(&s->complete, chunk); if (e) { free_pending_exception(pe); remap_exception(s, e, bio, chunk); @@ -1125,6 +1666,78 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, return r; } +/* + * A snapshot-merge target behaves like a combination of a snapshot + * target and a snapshot-origin target. It only generates new + * exceptions in other snapshots and not in the one that is being + * merged. + * + * For each chunk, if there is an existing exception, it is used to + * redirect I/O to the cow device. Otherwise I/O is sent to the origin, + * which in turn might generate exceptions in other snapshots. + * If merging is currently taking place on the chunk in question, the + * I/O is deferred by adding it to s->bios_queued_during_merge. + */ +static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct dm_exception *e; + struct dm_snapshot *s = ti->private; + int r = DM_MAPIO_REMAPPED; + chunk_t chunk; + + if (unlikely(bio_empty_barrier(bio))) { + if (!map_context->flush_request) + bio->bi_bdev = s->origin->bdev; + else + bio->bi_bdev = s->cow->bdev; + map_context->ptr = NULL; + return DM_MAPIO_REMAPPED; + } + + chunk = sector_to_chunk(s->store, bio->bi_sector); + + down_write(&s->lock); + + /* Full merging snapshots are redirected to the origin */ + if (!s->valid) + goto redirect_to_origin; + + /* If the block is already remapped - use that */ + e = dm_lookup_exception(&s->complete, chunk); + if (e) { + /* Queue writes overlapping with chunks being merged */ + if (bio_rw(bio) == WRITE && + chunk >= s->first_merging_chunk && + chunk < (s->first_merging_chunk + + s->num_merging_chunks)) { + bio->bi_bdev = s->origin->bdev; + bio_list_add(&s->bios_queued_during_merge, bio); + r = DM_MAPIO_SUBMITTED; + goto out_unlock; + } + + remap_exception(s, e, bio, chunk); + + if (bio_rw(bio) == WRITE) + map_context->ptr = track_chunk(s, chunk); + goto out_unlock; + } + +redirect_to_origin: + bio->bi_bdev = s->origin->bdev; + + if (bio_rw(bio) == WRITE) { + up_write(&s->lock); + return do_origin(s->origin, bio); + } + +out_unlock: + up_write(&s->lock); + + return r; +} + static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error, union map_info *map_context) { @@ -1137,40 +1750,135 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio, return 0; } +static void snapshot_merge_presuspend(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + stop_merge(s); +} + +static void snapshot_postsuspend(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + down_write(&s->lock); + s->suspended = 1; + up_write(&s->lock); +} + +static int snapshot_preresume(struct dm_target *ti) +{ + int r = 0; + struct dm_snapshot *s = ti->private; + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; + + down_read(&_origins_lock); + (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); + if (snap_src && snap_dest) { + down_read(&snap_src->lock); + if (s == snap_src) { + DMERR("Unable to resume snapshot source until " + "handover completes."); + r = -EINVAL; + } else if (!snap_src->suspended) { + DMERR("Unable to perform snapshot handover until " + "source is suspended."); + r = -EINVAL; + } + up_read(&snap_src->lock); + } + up_read(&_origins_lock); + + return r; +} + static void snapshot_resume(struct dm_target *ti) { struct dm_snapshot *s = ti->private; + struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; + + down_read(&_origins_lock); + (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); + if (snap_src && snap_dest) { + down_write(&snap_src->lock); + down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); + __handover_exceptions(snap_src, snap_dest); + up_write(&snap_dest->lock); + up_write(&snap_src->lock); + } + up_read(&_origins_lock); + + /* Now we have correct chunk size, reregister */ + reregister_snapshot(s); down_write(&s->lock); s->active = 1; + s->suspended = 0; up_write(&s->lock); } +static sector_t get_origin_minimum_chunksize(struct block_device *bdev) +{ + sector_t min_chunksize; + + down_read(&_origins_lock); + min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); + up_read(&_origins_lock); + + return min_chunksize; +} + +static void snapshot_merge_resume(struct dm_target *ti) +{ + struct dm_snapshot *s = ti->private; + + /* + * Handover exceptions from existing snapshot. + */ + snapshot_resume(ti); + + /* + * snapshot-merge acts as an origin, so set ti->split_io + */ + ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); + + start_merge(s); +} + static int snapshot_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { unsigned sz = 0; struct dm_snapshot *snap = ti->private; - down_write(&snap->lock); - switch (type) { case STATUSTYPE_INFO: + + down_write(&snap->lock); + if (!snap->valid) DMEMIT("Invalid"); + else if (snap->merge_failed) + DMEMIT("Merge failed"); else { - if (snap->store->type->fraction_full) { - sector_t numerator, denominator; - snap->store->type->fraction_full(snap->store, - &numerator, - &denominator); - DMEMIT("%llu/%llu", - (unsigned long long)numerator, - (unsigned long long)denominator); + if (snap->store->type->usage) { + sector_t total_sectors, sectors_allocated, + metadata_sectors; + snap->store->type->usage(snap->store, + &total_sectors, + §ors_allocated, + &metadata_sectors); + DMEMIT("%llu/%llu %llu", + (unsigned long long)sectors_allocated, + (unsigned long long)total_sectors, + (unsigned long long)metadata_sectors); } else DMEMIT("Unknown"); } + + up_write(&snap->lock); + break; case STATUSTYPE_TABLE: @@ -1179,14 +1887,12 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, * to make private copies if the output is to * make sense. */ - DMEMIT("%s", snap->origin->name); + DMEMIT("%s %s", snap->origin->name, snap->cow->name); snap->store->type->status(snap->store, type, result + sz, maxlen - sz); break; } - up_write(&snap->lock); - return 0; } @@ -1202,17 +1908,36 @@ static int snapshot_iterate_devices(struct dm_target *ti, /*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/ -static int __origin_write(struct list_head *snapshots, struct bio *bio) + +/* + * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any + * supplied bio was ignored. The caller may submit it immediately. + * (No remapping actually occurs as the origin is always a direct linear + * map.) + * + * If further exceptions are required, DM_MAPIO_SUBMITTED is returned + * and any supplied bio is added to a list to be submitted once all + * the necessary exceptions exist. + */ +static int __origin_write(struct list_head *snapshots, sector_t sector, + struct bio *bio) { - int r = DM_MAPIO_REMAPPED, first = 0; + int r = DM_MAPIO_REMAPPED; struct dm_snapshot *snap; - struct dm_snap_exception *e; - struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; + struct dm_exception *e; + struct dm_snap_pending_exception *pe; + struct dm_snap_pending_exception *pe_to_start_now = NULL; + struct dm_snap_pending_exception *pe_to_start_last = NULL; chunk_t chunk; - LIST_HEAD(pe_queue); /* Do all the snapshots on this origin */ list_for_each_entry (snap, snapshots, list) { + /* + * Don't make new exceptions in a merging snapshot + * because it has effectively been deleted + */ + if (dm_target_is_snapshot_merge(snap->ti)) + continue; down_write(&snap->lock); @@ -1221,24 +1946,21 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) goto next_snapshot; /* Nothing to do if writing beyond end of snapshot */ - if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) + if (sector >= dm_table_get_size(snap->ti->table)) goto next_snapshot; /* * Remember, different snapshots can have * different chunk sizes. */ - chunk = sector_to_chunk(snap->store, bio->bi_sector); + chunk = sector_to_chunk(snap->store, sector); /* * Check exception table to see if block * is already remapped in this snapshot * and trigger an exception if not. - * - * ref_count is initialised to 1 so pending_complete() - * won't destroy the primary_pe while we're inside this loop. */ - e = lookup_exception(&snap->complete, chunk); + e = dm_lookup_exception(&snap->complete, chunk); if (e) goto next_snapshot; @@ -1253,7 +1975,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) goto next_snapshot; } - e = lookup_exception(&snap->complete, chunk); + e = dm_lookup_exception(&snap->complete, chunk); if (e) { free_pending_exception(pe); goto next_snapshot; @@ -1266,59 +1988,43 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) } } - if (!primary_pe) { - /* - * Either every pe here has same - * primary_pe or none has one yet. - */ - if (pe->primary_pe) - primary_pe = pe->primary_pe; - else { - primary_pe = pe; - first = 1; - } - - bio_list_add(&primary_pe->origin_bios, bio); + r = DM_MAPIO_SUBMITTED; - r = DM_MAPIO_SUBMITTED; - } + /* + * If an origin bio was supplied, queue it to wait for the + * completion of this exception, and start this one last, + * at the end of the function. + */ + if (bio) { + bio_list_add(&pe->origin_bios, bio); + bio = NULL; - if (!pe->primary_pe) { - pe->primary_pe = primary_pe; - get_pending_exception(primary_pe); + if (!pe->started) { + pe->started = 1; + pe_to_start_last = pe; + } } if (!pe->started) { pe->started = 1; - list_add_tail(&pe->list, &pe_queue); + pe_to_start_now = pe; } next_snapshot: up_write(&snap->lock); - } - if (!primary_pe) - return r; - - /* - * If this is the first time we're processing this chunk and - * ref_count is now 1 it means all the pending exceptions - * got completed while we were in the loop above, so it falls to - * us here to remove the primary_pe and submit any origin_bios. - */ - - if (first && atomic_dec_and_test(&primary_pe->ref_count)) { - flush_bios(bio_list_get(&primary_pe->origin_bios)); - free_pending_exception(primary_pe); - /* If we got here, pe_queue is necessarily empty. */ - return r; + if (pe_to_start_now) { + start_copy(pe_to_start_now); + pe_to_start_now = NULL; + } } /* - * Now that we have a complete pe list we can start the copying. + * Submit the exception against which the bio is queued last, + * to give the other exceptions a head start. */ - list_for_each_entry_safe(pe, next_pe, &pe_queue, list) - start_copy(pe); + if (pe_to_start_last) + start_copy(pe_to_start_last); return r; } @@ -1334,13 +2040,48 @@ static int do_origin(struct dm_dev *origin, struct bio *bio) down_read(&_origins_lock); o = __lookup_origin(origin->bdev); if (o) - r = __origin_write(&o->snapshots, bio); + r = __origin_write(&o->snapshots, bio->bi_sector, bio); up_read(&_origins_lock); return r; } /* + * Trigger exceptions in all non-merging snapshots. + * + * The chunk size of the merging snapshot may be larger than the chunk + * size of some other snapshot so we may need to reallocate multiple + * chunks in other snapshots. + * + * We scan all the overlapping exceptions in the other snapshots. + * Returns 1 if anything was reallocated and must be waited for, + * otherwise returns 0. + * + * size must be a multiple of merging_snap's chunk_size. + */ +static int origin_write_extent(struct dm_snapshot *merging_snap, + sector_t sector, unsigned size) +{ + int must_wait = 0; + sector_t n; + struct origin *o; + + /* + * The origin's __minimum_chunk_size() got stored in split_io + * by snapshot_merge_resume(). + */ + down_read(&_origins_lock); + o = __lookup_origin(merging_snap->origin->bdev); + for (n = 0; n < size; n += merging_snap->ti->split_io) + if (__origin_write(&o->snapshots, sector + n, NULL) == + DM_MAPIO_SUBMITTED) + must_wait = 1; + up_read(&_origins_lock); + + return must_wait; +} + +/* * Origin: maps a linear range of a device, with hooks for snapshotting. */ @@ -1391,8 +2132,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio, return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; } -#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) - /* * Set the target "split_io" field to the minimum of all the snapshots' * chunk sizes. @@ -1400,19 +2139,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio, static void origin_resume(struct dm_target *ti) { struct dm_dev *dev = ti->private; - struct dm_snapshot *snap; - struct origin *o; - unsigned chunk_size = 0; - - down_read(&_origins_lock); - o = __lookup_origin(dev->bdev); - if (o) - list_for_each_entry (snap, &o->snapshots, list) - chunk_size = min_not_zero(chunk_size, - snap->store->chunk_size); - up_read(&_origins_lock); - ti->split_io = chunk_size; + ti->split_io = get_origin_minimum_chunksize(dev->bdev); } static int origin_status(struct dm_target *ti, status_type_t type, char *result, @@ -1455,17 +2183,35 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 7, 0}, + .version = {1, 9, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, .map = snapshot_map, .end_io = snapshot_end_io, + .postsuspend = snapshot_postsuspend, + .preresume = snapshot_preresume, .resume = snapshot_resume, .status = snapshot_status, .iterate_devices = snapshot_iterate_devices, }; +static struct target_type merge_target = { + .name = dm_snapshot_merge_target_name, + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = snapshot_ctr, + .dtr = snapshot_dtr, + .map = snapshot_merge_map, + .end_io = snapshot_end_io, + .presuspend = snapshot_merge_presuspend, + .postsuspend = snapshot_postsuspend, + .preresume = snapshot_preresume, + .resume = snapshot_merge_resume, + .status = snapshot_status, + .iterate_devices = snapshot_iterate_devices, +}; + static int __init dm_snapshot_init(void) { int r; @@ -1477,7 +2223,7 @@ static int __init dm_snapshot_init(void) } r = dm_register_target(&snapshot_target); - if (r) { + if (r < 0) { DMERR("snapshot target register failed %d", r); goto bad_register_snapshot_target; } @@ -1485,34 +2231,40 @@ static int __init dm_snapshot_init(void) r = dm_register_target(&origin_target); if (r < 0) { DMERR("Origin target register failed %d", r); - goto bad1; + goto bad_register_origin_target; + } + + r = dm_register_target(&merge_target); + if (r < 0) { + DMERR("Merge target register failed %d", r); + goto bad_register_merge_target; } r = init_origin_hash(); if (r) { DMERR("init_origin_hash failed."); - goto bad2; + goto bad_origin_hash; } - exception_cache = KMEM_CACHE(dm_snap_exception, 0); + exception_cache = KMEM_CACHE(dm_exception, 0); if (!exception_cache) { DMERR("Couldn't create exception cache."); r = -ENOMEM; - goto bad3; + goto bad_exception_cache; } pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); if (!pending_cache) { DMERR("Couldn't create pending cache."); r = -ENOMEM; - goto bad4; + goto bad_pending_cache; } tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); if (!tracked_chunk_cache) { DMERR("Couldn't create cache to track chunks in use."); r = -ENOMEM; - goto bad5; + goto bad_tracked_chunk_cache; } ksnapd = create_singlethread_workqueue("ksnapd"); @@ -1526,19 +2278,21 @@ static int __init dm_snapshot_init(void) bad_pending_pool: kmem_cache_destroy(tracked_chunk_cache); -bad5: +bad_tracked_chunk_cache: kmem_cache_destroy(pending_cache); -bad4: +bad_pending_cache: kmem_cache_destroy(exception_cache); -bad3: +bad_exception_cache: exit_origin_hash(); -bad2: +bad_origin_hash: + dm_unregister_target(&merge_target); +bad_register_merge_target: dm_unregister_target(&origin_target); -bad1: +bad_register_origin_target: dm_unregister_target(&snapshot_target); - bad_register_snapshot_target: dm_exception_store_exit(); + return r; } @@ -1548,6 +2302,7 @@ static void __exit dm_snapshot_exit(void) dm_unregister_target(&snapshot_target); dm_unregister_target(&origin_target); + dm_unregister_target(&merge_target); exit_origin_hash(); kmem_cache_destroy(pending_cache); diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index 4b045903a4e..f53392df7b9 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -59,7 +59,7 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) { - sprintf(buf, "%d\n", dm_suspended(md)); + sprintf(buf, "%d\n", dm_suspended_md(md)); return strlen(buf); } @@ -80,12 +80,20 @@ static struct sysfs_ops dm_sysfs_ops = { }; /* + * The sysfs structure is embedded in md struct, nothing to do here + */ +static void dm_sysfs_release(struct kobject *kobj) +{ +} + +/* * dm kobject is embedded in mapped_device structure * no need to define release function here */ static struct kobj_type dm_ktype = { .sysfs_ops = &dm_sysfs_ops, .default_attrs = dm_attrs, + .release = dm_sysfs_release }; /* diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 1a6cb3c7822..be625475cf6 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -12,6 +12,7 @@ #include <linux/blkdev.h> #include <linux/namei.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mutex.h> @@ -237,6 +238,9 @@ void dm_table_destroy(struct dm_table *t) { unsigned int i; + if (!t) + return; + while (atomic_read(&t->holders)) msleep(1); smp_mb(); @@ -600,11 +604,8 @@ int dm_split_args(int *argc, char ***argvp, char *input) return -ENOMEM; while (1) { - start = end; - /* Skip whitespace */ - while (*start && isspace(*start)) - start++; + start = skip_spaces(end); if (!*start) break; /* success, we hit the end */ diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c index 6f65883aef1..c7c555a8c7b 100644 --- a/drivers/md/dm-uevent.c +++ b/drivers/md/dm-uevent.c @@ -139,14 +139,13 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj) list_del_init(&event->elist); /* - * Need to call dm_copy_name_and_uuid from here for now. - * Context of previous var adds and locking used for - * hash_cell not compatable. + * When a device is being removed this copy fails and we + * discard these unsent events. */ if (dm_copy_name_and_uuid(event->md, event->name, event->uuid)) { - DMERR("%s: dm_copy_name_and_uuid() failed", - __func__); + DMINFO("%s: skipping sending uevent for lost device", + __func__); goto uevent_free; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 724efc63904..3167480b532 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -143,9 +143,19 @@ struct mapped_device { int barrier_error; /* + * Protect barrier_error from concurrent endio processing + * in request-based dm. + */ + spinlock_t barrier_error_lock; + + /* * Processing queue (flush/barriers) */ struct workqueue_struct *wq; + struct work_struct barrier_work; + + /* A pointer to the currently processing pre/post flush request */ + struct request *flush_request; /* * The current mapping. @@ -178,9 +188,6 @@ struct mapped_device { /* forced geometry settings */ struct hd_geometry geometry; - /* marker of flush suspend for request-based dm */ - struct request suspend_rq; - /* For saving the address of __make_request for request based dm */ make_request_fn *saved_make_request_fn; @@ -275,6 +282,7 @@ static int (*_inits[])(void) __initdata = { dm_target_init, dm_linear_init, dm_stripe_init, + dm_io_init, dm_kcopyd_init, dm_interface_init, }; @@ -284,6 +292,7 @@ static void (*_exits[])(void) = { dm_target_exit, dm_linear_exit, dm_stripe_exit, + dm_io_exit, dm_kcopyd_exit, dm_interface_exit, }; @@ -320,6 +329,11 @@ static void __exit dm_exit(void) /* * Block device functions */ +int dm_deleting_md(struct mapped_device *md) +{ + return test_bit(DMF_DELETING, &md->flags); +} + static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; @@ -331,7 +345,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) goto out; if (test_bit(DMF_FREEING, &md->flags) || - test_bit(DMF_DELETING, &md->flags)) { + dm_deleting_md(md)) { md = NULL; goto out; } @@ -388,7 +402,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *tgt; int r = -ENOTTY; @@ -401,7 +415,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, tgt = dm_table_get_target(map, 0); - if (dm_suspended(md)) { + if (dm_suspended_md(md)) { r = -EAGAIN; goto out; } @@ -430,9 +444,10 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) mempool_free(tio, md->tio_pool); } -static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md) +static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, + gfp_t gfp_mask) { - return mempool_alloc(md->tio_pool, GFP_ATOMIC); + return mempool_alloc(md->tio_pool, gfp_mask); } static void free_rq_tio(struct dm_rq_target_io *tio) @@ -450,6 +465,12 @@ static void free_bio_info(struct dm_rq_clone_bio_info *info) mempool_free(info, info->tio->md->io_pool); } +static int md_in_flight(struct mapped_device *md) +{ + return atomic_read(&md->pending[READ]) + + atomic_read(&md->pending[WRITE]); +} + static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; @@ -512,7 +533,7 @@ static void queue_io(struct mapped_device *md, struct bio *bio) * function to access the md->map field, and make sure they call * dm_table_put() when finished. */ -struct dm_table *dm_get_table(struct mapped_device *md) +struct dm_table *dm_get_live_table(struct mapped_device *md) { struct dm_table *t; unsigned long flags; @@ -716,28 +737,38 @@ static void end_clone_bio(struct bio *clone, int error) blk_update_request(tio->orig, 0, nr_bytes); } +static void store_barrier_error(struct mapped_device *md, int error) +{ + unsigned long flags; + + spin_lock_irqsave(&md->barrier_error_lock, flags); + /* + * Basically, the first error is taken, but: + * -EOPNOTSUPP supersedes any I/O error. + * Requeue request supersedes any I/O error but -EOPNOTSUPP. + */ + if (!md->barrier_error || error == -EOPNOTSUPP || + (md->barrier_error != -EOPNOTSUPP && + error == DM_ENDIO_REQUEUE)) + md->barrier_error = error; + spin_unlock_irqrestore(&md->barrier_error_lock, flags); +} + /* * Don't touch any member of the md after calling this function because * the md may be freed in dm_put() at the end of this function. * Or do dm_get() before calling this function and dm_put() later. */ -static void rq_completed(struct mapped_device *md, int run_queue) +static void rq_completed(struct mapped_device *md, int rw, int run_queue) { - int wakeup_waiters = 0; - struct request_queue *q = md->queue; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - if (!queue_in_flight(q)) - wakeup_waiters = 1; - spin_unlock_irqrestore(q->queue_lock, flags); + atomic_dec(&md->pending[rw]); /* nudge anyone waiting on suspend queue */ - if (wakeup_waiters) + if (!md_in_flight(md)) wake_up(&md->wait); if (run_queue) - blk_run_queue(q); + blk_run_queue(md->queue); /* * dm_put() must be at the end of this function. See the comment above @@ -753,6 +784,44 @@ static void free_rq_clone(struct request *clone) free_rq_tio(tio); } +/* + * Complete the clone and the original request. + * Must be called without queue lock. + */ +static void dm_end_request(struct request *clone, int error) +{ + int rw = rq_data_dir(clone); + int run_queue = 1; + bool is_barrier = blk_barrier_rq(clone); + struct dm_rq_target_io *tio = clone->end_io_data; + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; + + if (blk_pc_request(rq) && !is_barrier) { + rq->errors = clone->errors; + rq->resid_len = clone->resid_len; + + if (rq->sense) + /* + * We are using the sense buffer of the original + * request. + * So setting the length of the sense data is enough. + */ + rq->sense_len = clone->sense_len; + } + + free_rq_clone(clone); + + if (unlikely(is_barrier)) { + if (unlikely(error)) + store_barrier_error(md, error); + run_queue = 0; + } else + blk_end_request_all(rq, error); + + rq_completed(md, rw, run_queue); +} + static void dm_unprep_request(struct request *rq) { struct request *clone = rq->special; @@ -768,12 +837,23 @@ static void dm_unprep_request(struct request *rq) */ void dm_requeue_unmapped_request(struct request *clone) { + int rw = rq_data_dir(clone); struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; struct request *rq = tio->orig; struct request_queue *q = rq->q; unsigned long flags; + if (unlikely(blk_barrier_rq(clone))) { + /* + * Barrier clones share an original request. + * Leave it to dm_end_request(), which handles this special + * case. + */ + dm_end_request(clone, DM_ENDIO_REQUEUE); + return; + } + dm_unprep_request(rq); spin_lock_irqsave(q->queue_lock, flags); @@ -782,7 +862,7 @@ void dm_requeue_unmapped_request(struct request *clone) blk_requeue_request(q, rq); spin_unlock_irqrestore(q->queue_lock, flags); - rq_completed(md, 0); + rq_completed(md, rw, 0); } EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); @@ -815,34 +895,28 @@ static void start_queue(struct request_queue *q) spin_unlock_irqrestore(q->queue_lock, flags); } -/* - * Complete the clone and the original request. - * Must be called without queue lock. - */ -static void dm_end_request(struct request *clone, int error) +static void dm_done(struct request *clone, int error, bool mapped) { + int r = error; struct dm_rq_target_io *tio = clone->end_io_data; - struct mapped_device *md = tio->md; - struct request *rq = tio->orig; + dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; - if (blk_pc_request(rq)) { - rq->errors = clone->errors; - rq->resid_len = clone->resid_len; + if (mapped && rq_end_io) + r = rq_end_io(tio->ti, clone, error, &tio->info); - if (rq->sense) - /* - * We are using the sense buffer of the original - * request. - * So setting the length of the sense data is enough. - */ - rq->sense_len = clone->sense_len; + if (r <= 0) + /* The target wants to complete the I/O */ + dm_end_request(clone, r); + else if (r == DM_ENDIO_INCOMPLETE) + /* The target will handle the I/O */ + return; + else if (r == DM_ENDIO_REQUEUE) + /* The target wants to requeue the I/O */ + dm_requeue_unmapped_request(clone); + else { + DMWARN("unimplemented target endio return value: %d", r); + BUG(); } - - free_rq_clone(clone); - - blk_end_request_all(rq, error); - - rq_completed(md, 1); } /* @@ -850,27 +924,14 @@ static void dm_end_request(struct request *clone, int error) */ static void dm_softirq_done(struct request *rq) { + bool mapped = true; struct request *clone = rq->completion_data; struct dm_rq_target_io *tio = clone->end_io_data; - dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; - int error = tio->error; - if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) - error = rq_end_io(tio->ti, clone, error, &tio->info); + if (rq->cmd_flags & REQ_FAILED) + mapped = false; - if (error <= 0) - /* The target wants to complete the I/O */ - dm_end_request(clone, error); - else if (error == DM_ENDIO_INCOMPLETE) - /* The target will handle the I/O */ - return; - else if (error == DM_ENDIO_REQUEUE) - /* The target wants to requeue the I/O */ - dm_requeue_unmapped_request(clone); - else { - DMWARN("unimplemented target endio return value: %d", error); - BUG(); - } + dm_done(clone, tio->error, mapped); } /* @@ -882,6 +943,19 @@ static void dm_complete_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; + if (unlikely(blk_barrier_rq(clone))) { + /* + * Barrier clones share an original request. So can't use + * softirq_done with the original. + * Pass the clone to dm_done() directly in this special case. + * It is safe (even if clone->q->queue_lock is held here) + * because there is no I/O dispatching during the completion + * of barrier clone. + */ + dm_done(clone, error, true); + return; + } + tio->error = error; rq->completion_data = clone; blk_complete_request(rq); @@ -898,6 +972,17 @@ void dm_kill_unmapped_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; + if (unlikely(blk_barrier_rq(clone))) { + /* + * Barrier clones share an original request. + * Leave it to dm_end_request(), which handles this special + * case. + */ + BUG_ON(error > 0); + dm_end_request(clone, error); + return; + } + rq->cmd_flags |= REQ_FAILED; dm_complete_request(clone, error); } @@ -1214,7 +1299,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) struct clone_info ci; int error = 0; - ci.map = dm_get_table(md); + ci.map = dm_get_live_table(md); if (unlikely(!ci.map)) { if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) bio_io_error(bio); @@ -1255,7 +1340,7 @@ static int dm_merge_bvec(struct request_queue *q, struct bio_vec *biovec) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *ti; sector_t max_sectors; int max_size = 0; @@ -1352,11 +1437,6 @@ static int dm_make_request(struct request_queue *q, struct bio *bio) { struct mapped_device *md = q->queuedata; - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { - bio_endio(bio, -EOPNOTSUPP); - return 0; - } - return md->saved_make_request_fn(q, bio); /* call __make_request() */ } @@ -1375,6 +1455,25 @@ static int dm_request(struct request_queue *q, struct bio *bio) return _dm_request(q, bio); } +/* + * Mark this request as flush request, so that dm_request_fn() can + * recognize. + */ +static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq) +{ + rq->cmd_type = REQ_TYPE_LINUX_BLOCK; + rq->cmd[0] = REQ_LB_OP_FLUSH; +} + +static bool dm_rq_is_flush_request(struct request *rq) +{ + if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK && + rq->cmd[0] == REQ_LB_OP_FLUSH) + return true; + else + return false; +} + void dm_dispatch_request(struct request *rq) { int r; @@ -1420,25 +1519,54 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, static int setup_clone(struct request *clone, struct request *rq, struct dm_rq_target_io *tio) { - int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, - dm_rq_bio_constructor, tio); + int r; - if (r) - return r; + if (dm_rq_is_flush_request(rq)) { + blk_rq_init(NULL, clone); + clone->cmd_type = REQ_TYPE_FS; + clone->cmd_flags |= (REQ_HARDBARRIER | WRITE); + } else { + r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, + dm_rq_bio_constructor, tio); + if (r) + return r; + + clone->cmd = rq->cmd; + clone->cmd_len = rq->cmd_len; + clone->sense = rq->sense; + clone->buffer = rq->buffer; + } - clone->cmd = rq->cmd; - clone->cmd_len = rq->cmd_len; - clone->sense = rq->sense; - clone->buffer = rq->buffer; clone->end_io = end_clone_request; clone->end_io_data = tio; return 0; } -static int dm_rq_flush_suspending(struct mapped_device *md) +static struct request *clone_rq(struct request *rq, struct mapped_device *md, + gfp_t gfp_mask) { - return !md->suspend_rq.special; + struct request *clone; + struct dm_rq_target_io *tio; + + tio = alloc_rq_tio(md, gfp_mask); + if (!tio) + return NULL; + + tio->md = md; + tio->ti = NULL; + tio->orig = rq; + tio->error = 0; + memset(&tio->info, 0, sizeof(tio->info)); + + clone = &tio->clone; + if (setup_clone(clone, rq, tio)) { + /* -ENOMEM */ + free_rq_tio(tio); + return NULL; + } + + return clone; } /* @@ -1447,39 +1575,19 @@ static int dm_rq_flush_suspending(struct mapped_device *md) static int dm_prep_fn(struct request_queue *q, struct request *rq) { struct mapped_device *md = q->queuedata; - struct dm_rq_target_io *tio; struct request *clone; - if (unlikely(rq == &md->suspend_rq)) { - if (dm_rq_flush_suspending(md)) - return BLKPREP_OK; - else - /* The flush suspend was interrupted */ - return BLKPREP_KILL; - } + if (unlikely(dm_rq_is_flush_request(rq))) + return BLKPREP_OK; if (unlikely(rq->special)) { DMWARN("Already has something in rq->special."); return BLKPREP_KILL; } - tio = alloc_rq_tio(md); /* Only one for each original request */ - if (!tio) - /* -ENOMEM */ - return BLKPREP_DEFER; - - tio->md = md; - tio->ti = NULL; - tio->orig = rq; - tio->error = 0; - memset(&tio->info, 0, sizeof(tio->info)); - - clone = &tio->clone; - if (setup_clone(clone, rq, tio)) { - /* -ENOMEM */ - free_rq_tio(tio); + clone = clone_rq(rq, md, GFP_ATOMIC); + if (!clone) return BLKPREP_DEFER; - } rq->special = clone; rq->cmd_flags |= REQ_DONTPREP; @@ -1487,11 +1595,10 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) return BLKPREP_OK; } -static void map_request(struct dm_target *ti, struct request *rq, +static void map_request(struct dm_target *ti, struct request *clone, struct mapped_device *md) { int r; - struct request *clone = rq->special; struct dm_rq_target_io *tio = clone->end_io_data; /* @@ -1511,6 +1618,8 @@ static void map_request(struct dm_target *ti, struct request *rq, break; case DM_MAPIO_REMAPPED: /* The target has remapped the I/O so dispatch it */ + trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), + blk_rq_pos(tio->orig)); dm_dispatch_request(clone); break; case DM_MAPIO_REQUEUE: @@ -1536,29 +1645,26 @@ static void map_request(struct dm_target *ti, struct request *rq, static void dm_request_fn(struct request_queue *q) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *ti; - struct request *rq; + struct request *rq, *clone; /* - * For noflush suspend, check blk_queue_stopped() to immediately - * quit I/O dispatching. + * For suspend, check blk_queue_stopped() and increment + * ->pending within a single queue_lock not to increment the + * number of in-flight I/Os after the queue is stopped in + * dm_suspend(). */ while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { rq = blk_peek_request(q); if (!rq) goto plug_and_out; - if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */ - if (queue_in_flight(q)) - /* Not quiet yet. Wait more */ - goto plug_and_out; - - /* This device should be quiet now */ - __stop_queue(q); + if (unlikely(dm_rq_is_flush_request(rq))) { + BUG_ON(md->flush_request); + md->flush_request = rq; blk_start_request(rq); - __blk_end_request_all(rq, 0); - wake_up(&md->wait); + queue_work(md->wq, &md->barrier_work); goto out; } @@ -1567,8 +1673,11 @@ static void dm_request_fn(struct request_queue *q) goto plug_and_out; blk_start_request(rq); + clone = rq->special; + atomic_inc(&md->pending[rq_data_dir(clone)]); + spin_unlock(q->queue_lock); - map_request(ti, rq, md); + map_request(ti, clone, md); spin_lock_irq(q->queue_lock); } @@ -1595,7 +1704,7 @@ static int dm_lld_busy(struct request_queue *q) { int r; struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) r = 1; @@ -1610,7 +1719,7 @@ static int dm_lld_busy(struct request_queue *q) static void dm_unplug_all(struct request_queue *q) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); if (map) { if (dm_request_based(md)) @@ -1628,7 +1737,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) struct dm_table *map; if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - map = dm_get_table(md); + map = dm_get_live_table(md); if (map) { /* * Request-based dm cares about only own queue for @@ -1725,6 +1834,7 @@ out: static const struct block_device_operations dm_blk_dops; static void dm_wq_work(struct work_struct *work); +static void dm_rq_barrier_work(struct work_struct *work); /* * Allocate and initialise a blank device with a given minor. @@ -1754,6 +1864,7 @@ static struct mapped_device *alloc_dev(int minor) init_rwsem(&md->io_lock); mutex_init(&md->suspend_lock); spin_lock_init(&md->deferred_lock); + spin_lock_init(&md->barrier_error_lock); rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); @@ -1788,6 +1899,8 @@ static struct mapped_device *alloc_dev(int minor) blk_queue_softirq_done(md->queue, dm_softirq_done); blk_queue_prep_rq(md->queue, dm_prep_fn); blk_queue_lld_busy(md->queue, dm_lld_busy); + blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH, + dm_rq_prepare_flush); md->disk = alloc_disk(1); if (!md->disk) @@ -1797,6 +1910,7 @@ static struct mapped_device *alloc_dev(int minor) atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); + INIT_WORK(&md->barrier_work, dm_rq_barrier_work); init_waitqueue_head(&md->eventq); md->disk->major = _major; @@ -1921,9 +2035,13 @@ static void __set_size(struct mapped_device *md, sector_t size) mutex_unlock(&md->bdev->bd_inode->i_mutex); } -static int __bind(struct mapped_device *md, struct dm_table *t, - struct queue_limits *limits) +/* + * Returns old map, which caller must destroy. + */ +static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, + struct queue_limits *limits) { + struct dm_table *old_map; struct request_queue *q = md->queue; sector_t size; unsigned long flags; @@ -1938,11 +2056,6 @@ static int __bind(struct mapped_device *md, struct dm_table *t, __set_size(md, size); - if (!size) { - dm_table_destroy(t); - return 0; - } - dm_table_event_callback(t, event_callback, md); /* @@ -1958,26 +2071,31 @@ static int __bind(struct mapped_device *md, struct dm_table *t, __bind_mempools(md, t); write_lock_irqsave(&md->map_lock, flags); + old_map = md->map; md->map = t; dm_table_set_restrictions(t, q, limits); write_unlock_irqrestore(&md->map_lock, flags); - return 0; + return old_map; } -static void __unbind(struct mapped_device *md) +/* + * Returns unbound table for the caller to free. + */ +static struct dm_table *__unbind(struct mapped_device *md) { struct dm_table *map = md->map; unsigned long flags; if (!map) - return; + return NULL; dm_table_event_callback(map, NULL, NULL); write_lock_irqsave(&md->map_lock, flags); md->map = NULL; write_unlock_irqrestore(&md->map_lock, flags); - dm_table_destroy(map); + + return map; } /* @@ -2059,18 +2177,18 @@ void dm_put(struct mapped_device *md) BUG_ON(test_bit(DMF_FREEING, &md->flags)); if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { - map = dm_get_table(md); + map = dm_get_live_table(md); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - if (!dm_suspended(md)) { + if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } dm_sysfs_exit(md); dm_table_put(map); - __unbind(md); + dm_table_destroy(__unbind(md)); free_dev(md); } } @@ -2080,8 +2198,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) { int r = 0; DECLARE_WAITQUEUE(wait, current); - struct request_queue *q = md->queue; - unsigned long flags; dm_unplug_all(md->queue); @@ -2091,15 +2207,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) set_current_state(interruptible); smp_mb(); - if (dm_request_based(md)) { - spin_lock_irqsave(q->queue_lock, flags); - if (!queue_in_flight(q) && blk_queue_stopped(q)) { - spin_unlock_irqrestore(q->queue_lock, flags); - break; - } - spin_unlock_irqrestore(q->queue_lock, flags); - } else if (!atomic_read(&md->pending[0]) && - !atomic_read(&md->pending[1])) + if (!md_in_flight(md)) break; if (interruptible == TASK_INTERRUPTIBLE && @@ -2194,98 +2302,106 @@ static void dm_queue_flush(struct mapped_device *md) queue_work(md->wq, &md->work); } -/* - * Swap in a new table (destroying old one). - */ -int dm_swap_table(struct mapped_device *md, struct dm_table *table) +static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr) { - struct queue_limits limits; - int r = -EINVAL; + struct dm_rq_target_io *tio = clone->end_io_data; - mutex_lock(&md->suspend_lock); + tio->info.flush_request = flush_nr; +} - /* device must be suspended */ - if (!dm_suspended(md)) - goto out; +/* Issue barrier requests to targets and wait for their completion. */ +static int dm_rq_barrier(struct mapped_device *md) +{ + int i, j; + struct dm_table *map = dm_get_live_table(md); + unsigned num_targets = dm_table_get_num_targets(map); + struct dm_target *ti; + struct request *clone; - r = dm_calculate_queue_limits(table, &limits); - if (r) - goto out; + md->barrier_error = 0; - /* cannot change the device type, once a table is bound */ - if (md->map && - (dm_table_get_type(md->map) != dm_table_get_type(table))) { - DMWARN("can't change the device type after a table is bound"); - goto out; + for (i = 0; i < num_targets; i++) { + ti = dm_table_get_target(map, i); + for (j = 0; j < ti->num_flush_requests; j++) { + clone = clone_rq(md->flush_request, md, GFP_NOIO); + dm_rq_set_flush_nr(clone, j); + atomic_inc(&md->pending[rq_data_dir(clone)]); + map_request(ti, clone, md); + } } - __unbind(md); - r = __bind(md, table, &limits); - -out: - mutex_unlock(&md->suspend_lock); - return r; -} + dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); + dm_table_put(map); -static void dm_rq_invalidate_suspend_marker(struct mapped_device *md) -{ - md->suspend_rq.special = (void *)0x1; + return md->barrier_error; } -static void dm_rq_abort_suspend(struct mapped_device *md, int noflush) +static void dm_rq_barrier_work(struct work_struct *work) { + int error; + struct mapped_device *md = container_of(work, struct mapped_device, + barrier_work); struct request_queue *q = md->queue; + struct request *rq; unsigned long flags; - spin_lock_irqsave(q->queue_lock, flags); - if (!noflush) - dm_rq_invalidate_suspend_marker(md); - __start_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); -} + /* + * Hold the md reference here and leave it at the last part so that + * the md can't be deleted by device opener when the barrier request + * completes. + */ + dm_get(md); -static void dm_rq_start_suspend(struct mapped_device *md, int noflush) -{ - struct request *rq = &md->suspend_rq; - struct request_queue *q = md->queue; + error = dm_rq_barrier(md); - if (noflush) - stop_queue(q); - else { - blk_rq_init(q, rq); - blk_insert_request(q, rq, 0, NULL); - } + rq = md->flush_request; + md->flush_request = NULL; + + if (error == DM_ENDIO_REQUEUE) { + spin_lock_irqsave(q->queue_lock, flags); + blk_requeue_request(q, rq); + spin_unlock_irqrestore(q->queue_lock, flags); + } else + blk_end_request_all(rq, error); + + blk_run_queue(q); + + dm_put(md); } -static int dm_rq_suspend_available(struct mapped_device *md, int noflush) +/* + * Swap in a new table, returning the old one for the caller to destroy. + */ +struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { - int r = 1; - struct request *rq = &md->suspend_rq; - struct request_queue *q = md->queue; - unsigned long flags; + struct dm_table *map = ERR_PTR(-EINVAL); + struct queue_limits limits; + int r; - if (noflush) - return r; + mutex_lock(&md->suspend_lock); - /* The marker must be protected by queue lock if it is in use */ - spin_lock_irqsave(q->queue_lock, flags); - if (unlikely(rq->ref_count)) { - /* - * This can happen, when the previous flush suspend was - * interrupted, the marker is still in the queue and - * this flush suspend has been invoked, because we don't - * remove the marker at the time of suspend interruption. - * We have only one marker per mapped_device, so we can't - * start another flush suspend while it is in use. - */ - BUG_ON(!rq->special); /* The marker should be invalidated */ - DMWARN("Invalidating the previous flush suspend is still in" - " progress. Please retry later."); - r = 0; + /* device must be suspended */ + if (!dm_suspended_md(md)) + goto out; + + r = dm_calculate_queue_limits(table, &limits); + if (r) { + map = ERR_PTR(r); + goto out; } - spin_unlock_irqrestore(q->queue_lock, flags); - return r; + /* cannot change the device type, once a table is bound */ + if (md->map && + (dm_table_get_type(md->map) != dm_table_get_type(table))) { + DMWARN("can't change the device type after a table is bound"); + goto out; + } + + map = __bind(md, table, &limits); + +out: + mutex_unlock(&md->suspend_lock); + return map; } /* @@ -2330,49 +2446,11 @@ static void unlock_fs(struct mapped_device *md) /* * Suspend mechanism in request-based dm. * - * After the suspend starts, further incoming requests are kept in - * the request_queue and deferred. - * Remaining requests in the request_queue at the start of suspend are flushed - * if it is flush suspend. - * The suspend completes when the following conditions have been satisfied, - * so wait for it: - * 1. q->in_flight is 0 (which means no in_flight request) - * 2. queue has been stopped (which means no request dispatching) - * + * 1. Flush all I/Os by lock_fs() if needed. + * 2. Stop dispatching any I/O by stopping the request_queue. + * 3. Wait for all in-flight I/Os to be completed or requeued. * - * Noflush suspend - * --------------- - * Noflush suspend doesn't need to dispatch remaining requests. - * So stop the queue immediately. Then, wait for all in_flight requests - * to be completed or requeued. - * - * To abort noflush suspend, start the queue. - * - * - * Flush suspend - * ------------- - * Flush suspend needs to dispatch remaining requests. So stop the queue - * after the remaining requests are completed. (Requeued request must be also - * re-dispatched and completed. Until then, we can't stop the queue.) - * - * During flushing the remaining requests, further incoming requests are also - * inserted to the same queue. To distinguish which requests are to be - * flushed, we insert a marker request to the queue at the time of starting - * flush suspend, like a barrier. - * The dispatching is blocked when the marker is found on the top of the queue. - * And the queue is stopped when all in_flight requests are completed, since - * that means the remaining requests are completely flushed. - * Then, the marker is removed from the queue. - * - * To abort flush suspend, we also need to take care of the marker, not only - * starting the queue. - * We don't remove the marker forcibly from the queue since it's against - * the block-layer manner. Instead, we put a invalidated mark on the marker. - * When the invalidated marker is found on the top of the queue, it is - * immediately removed from the queue, so it doesn't block dispatching. - * Because we have only one marker per mapped_device, we can't start another - * flush suspend until the invalidated marker is removed from the queue. - * So fail and return with -EBUSY in such a case. + * To abort suspend, start the request_queue. */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) { @@ -2383,17 +2461,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) mutex_lock(&md->suspend_lock); - if (dm_suspended(md)) { + if (dm_suspended_md(md)) { r = -EINVAL; goto out_unlock; } - if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) { - r = -EBUSY; - goto out_unlock; - } - - map = dm_get_table(md); + map = dm_get_live_table(md); /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. @@ -2406,8 +2479,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) dm_table_presuspend_targets(map); /* - * Flush I/O to the device. noflush supersedes do_lockfs, - * because lock_fs() needs to flush I/Os. + * Flush I/O to the device. + * Any I/O submitted after lock_fs() may not be flushed. + * noflush takes precedence over do_lockfs. + * (lock_fs() flushes I/Os and waits for them to complete.) */ if (!noflush && do_lockfs) { r = lock_fs(md); @@ -2436,10 +2511,15 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); up_write(&md->io_lock); - flush_workqueue(md->wq); - + /* + * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which + * can be kicked until md->queue is stopped. So stop md->queue before + * flushing md->wq. + */ if (dm_request_based(md)) - dm_rq_start_suspend(md, noflush); + stop_queue(md->queue); + + flush_workqueue(md->wq); /* * At this point no more requests are entering target request routines. @@ -2458,7 +2538,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) dm_queue_flush(md); if (dm_request_based(md)) - dm_rq_abort_suspend(md, noflush); + start_queue(md->queue); unlock_fs(md); goto out; /* pushback list is already flushed, so skip flush */ @@ -2470,10 +2550,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) * requests are being added to md->deferred list. */ - dm_table_postsuspend_targets(map); - set_bit(DMF_SUSPENDED, &md->flags); + dm_table_postsuspend_targets(map); + out: dm_table_put(map); @@ -2488,10 +2568,10 @@ int dm_resume(struct mapped_device *md) struct dm_table *map = NULL; mutex_lock(&md->suspend_lock); - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) goto out; - map = dm_get_table(md); + map = dm_get_live_table(md); if (!map || !dm_table_get_size(map)) goto out; @@ -2592,18 +2672,29 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) return NULL; if (test_bit(DMF_FREEING, &md->flags) || - test_bit(DMF_DELETING, &md->flags)) + dm_deleting_md(md)) return NULL; dm_get(md); return md; } -int dm_suspended(struct mapped_device *md) +int dm_suspended_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED, &md->flags); } +int dm_suspended(struct dm_target *ti) +{ + struct mapped_device *md = dm_table_get_md(ti->table); + int r = dm_suspended_md(md); + + dm_put(md); + + return r; +} +EXPORT_SYMBOL_GPL(dm_suspended); + int dm_noflush_suspending(struct dm_target *ti) { struct mapped_device *md = dm_table_get_md(ti->table); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a7663eba17e..8dadaa5bc39 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -89,6 +89,16 @@ int dm_target_iterate(void (*iter_func)(struct target_type *tt, int dm_split_args(int *argc, char ***argvp, char *input); /* + * Is this mapped_device being deleted? + */ +int dm_deleting_md(struct mapped_device *md); + +/* + * Is this mapped_device suspended? + */ +int dm_suspended_md(struct mapped_device *md); + +/* * The device-mapper can be driven through one of two interfaces; * ioctl or filesystem, depending which patch you have applied. */ @@ -118,6 +128,9 @@ int dm_lock_for_deletion(struct mapped_device *md); void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie); +int dm_io_init(void); +void dm_io_exit(void); + int dm_kcopyd_init(void); void dm_kcopyd_exit(void); diff --git a/drivers/md/md.c b/drivers/md/md.c index e1f3c1715cc..f4f5f82f9f5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -39,6 +39,7 @@ #include <linux/buffer_head.h> /* for invalidate_bdev */ #include <linux/poll.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/hdreg.h> #include <linux/proc_fs.h> #include <linux/random.h> @@ -1935,15 +1936,11 @@ static void print_sb_1(struct mdp_superblock_1 *sb) uuid = sb->set_uuid; printk(KERN_INFO - "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" - ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" + "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" "md: Name: \"%s\" CT:%llu\n", le32_to_cpu(sb->major_version), le32_to_cpu(sb->feature_map), - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7], - uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15], + uuid, sb->set_name, (unsigned long long)le64_to_cpu(sb->ctime) & MD_SUPERBLOCK_1_TIME_SEC_MASK); @@ -1952,8 +1949,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb) printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" " RO:%llu\n" - "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" - ":%02x%02x%02x%02x%02x%02x\n" + "md: Dev:%08x UUID: %pU\n" "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" "md: (MaxDev:%u) \n", le32_to_cpu(sb->level), @@ -1966,10 +1962,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb) (unsigned long long)le64_to_cpu(sb->super_offset), (unsigned long long)le64_to_cpu(sb->recovery_offset), le32_to_cpu(sb->dev_number), - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7], - uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15], + uuid, sb->devflags, (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, (unsigned long long)le64_to_cpu(sb->events), @@ -3439,8 +3432,7 @@ bitmap_store(mddev_t *mddev, const char *buf, size_t len) } if (*end && !isspace(*end)) break; bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); - buf = end; - while (isspace(*buf)) buf++; + buf = skip_spaces(end); } bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ out: diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c index 12a1b3d7132..c3916a42668 100644 --- a/drivers/media/video/davinci/vpfe_capture.c +++ b/drivers/media/video/davinci/vpfe_capture.c @@ -2127,7 +2127,7 @@ vpfe_resume(struct device *dev) return -1; } -static struct dev_pm_ops vpfe_dev_pm_ops = { +static const struct dev_pm_ops vpfe_dev_pm_ops = { .suspend = vpfe_suspend, .resume = vpfe_resume, }; diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c index d947ee5e4eb..78130721f57 100644 --- a/drivers/media/video/davinci/vpif_capture.c +++ b/drivers/media/video/davinci/vpif_capture.c @@ -2107,7 +2107,7 @@ vpif_resume(struct device *dev) return -1; } -static struct dev_pm_ops vpif_dev_pm_ops = { +static const struct dev_pm_ops vpif_dev_pm_ops = { .suspend = vpif_suspend, .resume = vpif_resume, }; diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index a4f3472d4db..961e4484d72 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -1825,7 +1825,7 @@ static int sh_mobile_ceu_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { +static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { .runtime_suspend = sh_mobile_ceu_runtime_nop, .runtime_resume = sh_mobile_ceu_runtime_nop, }; diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 223a90c7492..4b2021af1d9 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -809,6 +809,9 @@ static struct resource wm831x_wdt_resources[] = { static struct mfd_cell wm8310_devs[] = { { + .name = "wm831x-backup", + }, + { .name = "wm831x-buckv", .id = 1, .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), @@ -962,6 +965,9 @@ static struct mfd_cell wm8310_devs[] = { static struct mfd_cell wm8311_devs[] = { { + .name = "wm831x-backup", + }, + { .name = "wm831x-buckv", .id = 1, .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), @@ -1096,6 +1102,9 @@ static struct mfd_cell wm8311_devs[] = { static struct mfd_cell wm8312_devs[] = { { + .name = "wm831x-backup", + }, + { .name = "wm831x-buckv", .id = 1, .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 2c16ca6501d..59f4ba1b703 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -13,6 +13,20 @@ menuconfig MISC_DEVICES if MISC_DEVICES +config AD525X_DPOT + tristate "Analog Devices AD525x Digital Potentiometers" + depends on I2C && SYSFS + help + If you say yes here, you get support for the Analog Devices + AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255 + digital potentiometer chips. + + See Documentation/misc-devices/ad525x_dpot.txt for the + userspace interface. + + This driver can also be built as a module. If so, the module + will be called ad525x_dpot. + config ATMEL_PWM tristate "Atmel AT32/AT91 PWM support" depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 @@ -173,6 +187,30 @@ config SGI_XP this feature will allow for direct communication between SSIs based on a network adapter and DMA messaging. +config CS5535_MFGPT + tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" + depends on PCI + depends on X86 + default n + help + This driver provides access to MFGPT functionality for other + drivers that need timers. MFGPTs are available in the CS5535 and + CS5536 companion chips that are found in AMD Geode and several + other platforms. They have a better resolution and max interval + than the generic PIT, and are suitable for use as high-res timers. + You probably don't want to enable this manually; other drivers that + make use of it should enable it. + +config CS5535_MFGPT_DEFAULT_IRQ + int + default 7 + help + MFGPTs on the CS5535 require an interrupt. The selected IRQ + can be overridden as a module option as well as by driver that + use the cs5535_mfgpt_ API; however, different architectures might + want to use a different IRQ by default. This is here for + architectures to set as necessary. + config HP_ILO tristate "Channel interface driver for HP iLO/iLO2 processor" depends on PCI @@ -256,6 +294,16 @@ config DS1682 This driver can also be built as a module. If so, the module will be called ds1682. +config TI_DAC7512 + tristate "Texas Instruments DAC7512" + depends on SPI && SYSFS + help + If you say yes here you get support for the Texas Instruments + DAC7512 16-bit digital-to-analog converter. + + This driver can also be built as a module. If so, the module + will be calles ti_dac7512. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 906a0edcea4..049ff2482f3 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_IBM_ASM) += ibmasm/ obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ +obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o @@ -17,10 +18,12 @@ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_SGI_XP) += sgi-xp/ obj-$(CONFIG_SGI_GRU) += sgi-gru/ +obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o obj-$(CONFIG_HP_ILO) += hpilo.o obj-$(CONFIG_ISL29003) += isl29003.o obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o obj-$(CONFIG_DS1682) += ds1682.o +obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ obj-y += eeprom/ diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c new file mode 100644 index 00000000000..30a59f2bacd --- /dev/null +++ b/drivers/misc/ad525x_dpot.c @@ -0,0 +1,666 @@ +/* + * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers + * Copyright (c) 2009 Analog Devices, Inc. + * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> + * + * DEVID #Wipers #Positions Resistor Options (kOhm) + * AD5258 1 64 1, 10, 50, 100 + * AD5259 1 256 5, 10, 50, 100 + * AD5251 2 64 1, 10, 50, 100 + * AD5252 2 256 1, 10, 50, 100 + * AD5255 3 512 25, 250 + * AD5253 4 64 1, 10, 50, 100 + * AD5254 4 256 1, 10, 50, 100 + * + * See Documentation/misc-devices/ad525x_dpot.txt for more info. + * + * derived from ad5258.c + * Copyright (c) 2009 Cyber Switching, Inc. + * Author: Chris Verges <chrisv@cyberswitching.com> + * + * derived from ad5252.c + * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org> + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/delay.h> + +#define DRIVER_NAME "ad525x_dpot" +#define DRIVER_VERSION "0.1" + +enum dpot_devid { + AD5258_ID, + AD5259_ID, + AD5251_ID, + AD5252_ID, + AD5253_ID, + AD5254_ID, + AD5255_ID, +}; + +#define AD5258_MAX_POSITION 64 +#define AD5259_MAX_POSITION 256 +#define AD5251_MAX_POSITION 64 +#define AD5252_MAX_POSITION 256 +#define AD5253_MAX_POSITION 64 +#define AD5254_MAX_POSITION 256 +#define AD5255_MAX_POSITION 512 + +#define AD525X_RDAC0 0 +#define AD525X_RDAC1 1 +#define AD525X_RDAC2 2 +#define AD525X_RDAC3 3 + +#define AD525X_REG_TOL 0x18 +#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0) +#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1) +#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2) +#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3) + +/* RDAC-to-EEPROM Interface Commands */ +#define AD525X_I2C_RDAC (0x00 << 5) +#define AD525X_I2C_EEPROM (0x01 << 5) +#define AD525X_I2C_CMD (0x80) + +#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3)) +#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3)) +#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3)) +#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3)) + +static s32 ad525x_read(struct i2c_client *client, u8 reg); +static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value); + +/* + * Client data (each client gets its own) + */ + +struct dpot_data { + struct mutex update_lock; + unsigned rdac_mask; + unsigned max_pos; + unsigned devid; +}; + +/* sysfs functions */ + +static ssize_t sysfs_show_reg(struct device *dev, + struct device_attribute *attr, char *buf, u32 reg) +{ + struct i2c_client *client = to_i2c_client(dev); + struct dpot_data *data = i2c_get_clientdata(client); + s32 value; + + mutex_lock(&data->update_lock); + value = ad525x_read(client, reg); + mutex_unlock(&data->update_lock); + + if (value < 0) + return -EINVAL; + /* + * Let someone else deal with converting this ... + * the tolerance is a two-byte value where the MSB + * is a sign + integer value, and the LSB is a + * decimal value. See page 18 of the AD5258 + * datasheet (Rev. A) for more details. + */ + + if (reg & AD525X_REG_TOL) + return sprintf(buf, "0x%04x\n", value & 0xFFFF); + else + return sprintf(buf, "%u\n", value & data->rdac_mask); +} + +static ssize_t sysfs_set_reg(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, u32 reg) +{ + struct i2c_client *client = to_i2c_client(dev); + struct dpot_data *data = i2c_get_clientdata(client); + unsigned long value; + int err; + + err = strict_strtoul(buf, 10, &value); + if (err) + return err; + + if (value > data->rdac_mask) + value = data->rdac_mask; + + mutex_lock(&data->update_lock); + ad525x_write(client, reg, value); + if (reg & AD525X_I2C_EEPROM) + msleep(26); /* Sleep while the EEPROM updates */ + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t sysfs_do_cmd(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, u32 reg) +{ + struct i2c_client *client = to_i2c_client(dev); + struct dpot_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + ad525x_write(client, reg, 0); + mutex_unlock(&data->update_lock); + + return count; +} + +/* ------------------------------------------------------------------------- */ + +static ssize_t show_rdac0(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0); +} + +static ssize_t set_rdac0(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_RDAC | AD525X_RDAC0); +} + +static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0); + +static ssize_t show_eeprom0(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0); +} + +static ssize_t set_eeprom0(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_EEPROM | AD525X_RDAC0); +} + +static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0); + +static ssize_t show_tolerance0(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, + AD525X_I2C_EEPROM | AD525X_TOL_RDAC0); +} + +static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL); + +/* ------------------------------------------------------------------------- */ + +static ssize_t show_rdac1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1); +} + +static ssize_t set_rdac1(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_RDAC | AD525X_RDAC1); +} + +static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1); + +static ssize_t show_eeprom1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1); +} + +static ssize_t set_eeprom1(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_EEPROM | AD525X_RDAC1); +} + +static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1); + +static ssize_t show_tolerance1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, + AD525X_I2C_EEPROM | AD525X_TOL_RDAC1); +} + +static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL); + +/* ------------------------------------------------------------------------- */ + +static ssize_t show_rdac2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2); +} + +static ssize_t set_rdac2(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_RDAC | AD525X_RDAC2); +} + +static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2); + +static ssize_t show_eeprom2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2); +} + +static ssize_t set_eeprom2(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_EEPROM | AD525X_RDAC2); +} + +static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2); + +static ssize_t show_tolerance2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, + AD525X_I2C_EEPROM | AD525X_TOL_RDAC2); +} + +static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL); + +/* ------------------------------------------------------------------------- */ + +static ssize_t show_rdac3(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3); +} + +static ssize_t set_rdac3(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_RDAC | AD525X_RDAC3); +} + +static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3); + +static ssize_t show_eeprom3(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3); +} + +static ssize_t set_eeprom3(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_set_reg(dev, attr, buf, count, + AD525X_I2C_EEPROM | AD525X_RDAC3); +} + +static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3); + +static ssize_t show_tolerance3(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_show_reg(dev, attr, buf, + AD525X_I2C_EEPROM | AD525X_TOL_RDAC3); +} + +static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL); + +static struct attribute *ad525x_attributes_wipers[4][4] = { + { + &dev_attr_rdac0.attr, + &dev_attr_eeprom0.attr, + &dev_attr_tolerance0.attr, + NULL + }, { + &dev_attr_rdac1.attr, + &dev_attr_eeprom1.attr, + &dev_attr_tolerance1.attr, + NULL + }, { + &dev_attr_rdac2.attr, + &dev_attr_eeprom2.attr, + &dev_attr_tolerance2.attr, + NULL + }, { + &dev_attr_rdac3.attr, + &dev_attr_eeprom3.attr, + &dev_attr_tolerance3.attr, + NULL + } +}; + +static const struct attribute_group ad525x_group_wipers[] = { + {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]}, + {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]}, + {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]}, + {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]}, +}; + +/* ------------------------------------------------------------------------- */ + +static ssize_t set_inc_all(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL); +} + +static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all); + +static ssize_t set_dec_all(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL); +} + +static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all); + +static ssize_t set_inc_all_6db(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB); +} + +static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db); + +static ssize_t set_dec_all_6db(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB); +} + +static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db); + +static struct attribute *ad525x_attributes_commands[] = { + &dev_attr_inc_all.attr, + &dev_attr_dec_all.attr, + &dev_attr_inc_all_6db.attr, + &dev_attr_dec_all_6db.attr, + NULL +}; + +static const struct attribute_group ad525x_group_commands = { + .attrs = ad525x_attributes_commands, +}; + +/* ------------------------------------------------------------------------- */ + +/* i2c device functions */ + +/** + * ad525x_read - return the value contained in the specified register + * on the AD5258 device. + * @client: value returned from i2c_new_device() + * @reg: the register to read + * + * If the tolerance register is specified, 2 bytes are returned. + * Otherwise, 1 byte is returned. A negative value indicates an error + * occurred while reading the register. + */ +static s32 ad525x_read(struct i2c_client *client, u8 reg) +{ + struct dpot_data *data = i2c_get_clientdata(client); + + if ((reg & AD525X_REG_TOL) || (data->max_pos > 256)) + return i2c_smbus_read_word_data(client, (reg & 0xF8) | + ((reg & 0x7) << 1)); + else + return i2c_smbus_read_byte_data(client, reg); +} + +/** + * ad525x_write - store the given value in the specified register on + * the AD5258 device. + * @client: value returned from i2c_new_device() + * @reg: the register to write + * @value: the byte to store in the register + * + * For certain instructions that do not require a data byte, "NULL" + * should be specified for the "value" parameter. These instructions + * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM. + * + * A negative return value indicates an error occurred while reading + * the register. + */ +static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value) +{ + struct dpot_data *data = i2c_get_clientdata(client); + + /* Only write the instruction byte for certain commands */ + if (reg & AD525X_I2C_CMD) + return i2c_smbus_write_byte(client, reg); + + if (data->max_pos > 256) + return i2c_smbus_write_word_data(client, (reg & 0xF8) | + ((reg & 0x7) << 1), value); + else + /* All other registers require instruction + data bytes */ + return i2c_smbus_write_byte_data(client, reg, value); +} + +static int ad525x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct dpot_data *data; + int err = 0; + + dev_dbg(dev, "%s\n", __func__); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { + dev_err(dev, "missing I2C functionality for this driver\n"); + goto exit; + } + + data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + switch (id->driver_data) { + case AD5258_ID: + data->max_pos = AD5258_MAX_POSITION; + err = sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + break; + case AD5259_ID: + data->max_pos = AD5259_MAX_POSITION; + err = sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + break; + case AD5251_ID: + data->max_pos = AD5251_MAX_POSITION; + err = sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC3]); + err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); + break; + case AD5252_ID: + data->max_pos = AD5252_MAX_POSITION; + err = sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC3]); + err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); + break; + case AD5253_ID: + data->max_pos = AD5253_MAX_POSITION; + err = sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC2]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC3]); + err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); + break; + case AD5254_ID: + data->max_pos = AD5254_MAX_POSITION; + err = sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC2]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC3]); + err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); + break; + case AD5255_ID: + data->max_pos = AD5255_MAX_POSITION; + err = sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + err |= sysfs_create_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC2]); + err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); + break; + default: + err = -ENODEV; + goto exit_free; + } + + if (err) { + dev_err(dev, "failed to register sysfs hooks\n"); + goto exit_free; + } + + data->devid = id->driver_data; + data->rdac_mask = data->max_pos - 1; + + dev_info(dev, "%s %d-Position Digital Potentiometer registered\n", + id->name, data->max_pos); + + return 0; + +exit_free: + kfree(data); + i2c_set_clientdata(client, NULL); +exit: + dev_err(dev, "failed to create client\n"); + return err; +} + +static int __devexit ad525x_remove(struct i2c_client *client) +{ + struct dpot_data *data = i2c_get_clientdata(client); + struct device *dev = &client->dev; + + switch (data->devid) { + case AD5258_ID: + case AD5259_ID: + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + break; + case AD5251_ID: + case AD5252_ID: + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC3]); + sysfs_remove_group(&dev->kobj, &ad525x_group_commands); + break; + case AD5253_ID: + case AD5254_ID: + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC2]); + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC3]); + sysfs_remove_group(&dev->kobj, &ad525x_group_commands); + break; + case AD5255_ID: + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC0]); + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC1]); + sysfs_remove_group(&dev->kobj, + &ad525x_group_wipers[AD525X_RDAC2]); + sysfs_remove_group(&dev->kobj, &ad525x_group_commands); + break; + } + + i2c_set_clientdata(client, NULL); + kfree(data); + + return 0; +} + +static const struct i2c_device_id ad525x_idtable[] = { + {"ad5258", AD5258_ID}, + {"ad5259", AD5259_ID}, + {"ad5251", AD5251_ID}, + {"ad5252", AD5252_ID}, + {"ad5253", AD5253_ID}, + {"ad5254", AD5254_ID}, + {"ad5255", AD5255_ID}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, ad525x_idtable); + +static struct i2c_driver ad525x_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + }, + .id_table = ad525x_idtable, + .probe = ad525x_probe, + .remove = __devexit_p(ad525x_remove), +}; + +static int __init ad525x_init(void) +{ + return i2c_add_driver(&ad525x_driver); +} + +module_init(ad525x_init); + +static void __exit ad525x_exit(void) +{ + i2c_del_driver(&ad525x_driver); +} + +module_exit(ad525x_exit); + +MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " + "Michael Hennerich <hennerich@blackfin.uclinux.org>, "); +MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c new file mode 100644 index 00000000000..8110460558f --- /dev/null +++ b/drivers/misc/cs5535-mfgpt.c @@ -0,0 +1,370 @@ +/* + * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT) + * + * Copyright (C) 2006, Advanced Micro Devices, Inc. + * Copyright (C) 2007 Andres Salomon <dilinger@debian.org> + * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. + */ + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/cs5535.h> + +#define DRV_NAME "cs5535-mfgpt" +#define MFGPT_BAR 2 + +static int mfgpt_reset_timers; +module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); +MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; " + "required by some broken BIOSes (ie, TinyBIOS < 0.99)."); + +struct cs5535_mfgpt_timer { + struct cs5535_mfgpt_chip *chip; + int nr; +}; + +static struct cs5535_mfgpt_chip { + DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); + resource_size_t base; + + struct pci_dev *pdev; + spinlock_t lock; + int initialized; +} cs5535_mfgpt_chip; + +int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, + int event, int enable) +{ + uint32_t msr, mask, value, dummy; + int shift = (cmp == MFGPT_CMP1) ? 0 : 8; + + if (!timer) { + WARN_ON(1); + return -EIO; + } + + /* + * The register maps for these are described in sections 6.17.1.x of + * the AMD Geode CS5536 Companion Device Data Book. + */ + switch (event) { + case MFGPT_EVENT_RESET: + /* + * XXX: According to the docs, we cannot reset timers above + * 6; that is, resets for 7 and 8 will be ignored. Is this + * a problem? -dilinger + */ + msr = MSR_MFGPT_NR; + mask = 1 << (timer->nr + 24); + break; + + case MFGPT_EVENT_NMI: + msr = MSR_MFGPT_NR; + mask = 1 << (timer->nr + shift); + break; + + case MFGPT_EVENT_IRQ: + msr = MSR_MFGPT_IRQ; + mask = 1 << (timer->nr + shift); + break; + + default: + return -EIO; + } + + rdmsr(msr, value, dummy); + + if (enable) + value |= mask; + else + value &= ~mask; + + wrmsr(msr, value, dummy); + return 0; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event); + +int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq, + int enable) +{ + uint32_t zsel, lpc, dummy; + int shift; + + if (!timer) { + WARN_ON(1); + return -EIO; + } + + /* + * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA + * is using the same CMP of the timer's Siamese twin, the IRQ is set to + * 2, and we mustn't use nor change it. + * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the + * IRQ of the 1st. This can only happen if forcing an IRQ, calling this + * with *irq==0 is safe. Currently there _are_ no 2 drivers. + */ + rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); + shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4; + if (((zsel >> shift) & 0xF) == 2) + return -EIO; + + /* Choose IRQ: if none supplied, keep IRQ already set or use default */ + if (!*irq) + *irq = (zsel >> shift) & 0xF; + if (!*irq) + *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; + + /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ + if (*irq < 1 || *irq == 2 || *irq > 15) + return -EIO; + rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); + if (lpc & (1 << *irq)) + return -EIO; + + /* All chosen and checked - go for it */ + if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) + return -EIO; + if (enable) { + zsel = (zsel & ~(0xF << shift)) | (*irq << shift); + wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); + } + + return 0; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq); + +struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain) +{ + struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip; + struct cs5535_mfgpt_timer *timer = NULL; + unsigned long flags; + int max; + + if (!mfgpt->initialized) + goto done; + + /* only allocate timers from the working domain if requested */ + if (domain == MFGPT_DOMAIN_WORKING) + max = 6; + else + max = MFGPT_MAX_TIMERS; + + if (timer_nr >= max) { + /* programmer error. silly programmers! */ + WARN_ON(1); + goto done; + } + + spin_lock_irqsave(&mfgpt->lock, flags); + if (timer_nr < 0) { + unsigned long t; + + /* try to find any available timer */ + t = find_first_bit(mfgpt->avail, max); + /* set timer_nr to -1 if no timers available */ + timer_nr = t < max ? (int) t : -1; + } else { + /* check if the requested timer's available */ + if (test_bit(timer_nr, mfgpt->avail)) + timer_nr = -1; + } + + if (timer_nr >= 0) + /* if timer_nr is not -1, it's an available timer */ + __clear_bit(timer_nr, mfgpt->avail); + spin_unlock_irqrestore(&mfgpt->lock, flags); + + if (timer_nr < 0) + goto done; + + timer = kmalloc(sizeof(*timer), GFP_KERNEL); + if (!timer) { + /* aw hell */ + spin_lock_irqsave(&mfgpt->lock, flags); + __set_bit(timer_nr, mfgpt->avail); + spin_unlock_irqrestore(&mfgpt->lock, flags); + goto done; + } + timer->chip = mfgpt; + timer->nr = timer_nr; + dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr); + +done: + return timer; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer); + +/* + * XXX: This frees the timer memory, but never resets the actual hardware + * timer. The old geode_mfgpt code did this; it would be good to figure + * out a way to actually release the hardware timer. See comments below. + */ +void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer) +{ + kfree(timer); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer); + +uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg) +{ + return inw(timer->chip->base + reg + (timer->nr * 8)); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_read); + +void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, + uint16_t value) +{ + outw(value, timer->chip->base + reg + (timer->nr * 8)); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_write); + +/* + * This is a sledgehammer that resets all MFGPT timers. This is required by + * some broken BIOSes which leave the system in an unstable state + * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to + * whether or not this secret MSR can be used to release individual timers. + * Jordan tells me that he and Mitch once played w/ it, but it's unclear + * what the results of that were (and they experienced some instability). + */ +static void __init reset_all_timers(void) +{ + uint32_t val, dummy; + + /* The following undocumented bit resets the MFGPT timers */ + val = 0xFF; dummy = 0; + wrmsr(MSR_MFGPT_SETUP, val, dummy); +} + +/* + * Check whether any MFGPTs are available for the kernel to use. In most + * cases, firmware that uses AMD's VSA code will claim all timers during + * bootup; we certainly don't want to take them if they're already in use. + * In other cases (such as with VSAless OpenFirmware), the system firmware + * leaves timers available for us to use. + */ +static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) +{ + struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; + unsigned long flags; + int timers = 0; + uint16_t val; + int i; + + /* bios workaround */ + if (mfgpt_reset_timers) + reset_all_timers(); + + /* just to be safe, protect this section w/ lock */ + spin_lock_irqsave(&mfgpt->lock, flags); + for (i = 0; i < MFGPT_MAX_TIMERS; i++) { + timer.nr = i; + val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP); + if (!(val & MFGPT_SETUP_SETUP)) { + __set_bit(i, mfgpt->avail); + timers++; + } + } + spin_unlock_irqrestore(&mfgpt->lock, flags); + + return timers; +} + +static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + int err, t; + + /* There are two ways to get the MFGPT base address; one is by + * fetching it from MSR_LBAR_MFGPT, the other is by reading the + * PCI BAR info. The latter method is easier (especially across + * different architectures), so we'll stick with that for now. If + * it turns out to be unreliable in the face of crappy BIOSes, we + * can always go back to using MSRs.. */ + + err = pci_enable_device_io(pdev); + if (err) { + dev_err(&pdev->dev, "can't enable device IO\n"); + goto done; + } + + err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR); + goto done; + } + + /* set up the driver-specific struct */ + cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR); + cs5535_mfgpt_chip.pdev = pdev; + spin_lock_init(&cs5535_mfgpt_chip.lock); + + dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR, + (unsigned long long) cs5535_mfgpt_chip.base); + + /* detect the available timers */ + t = scan_timers(&cs5535_mfgpt_chip); + dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t); + cs5535_mfgpt_chip.initialized = 1; + return 0; + +done: + return err; +} + +static struct pci_device_id cs5535_mfgpt_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl); + +/* + * Just like with the cs5535-gpio driver, we can't use the standard PCI driver + * registration stuff. It only allows only one driver to bind to each PCI + * device, and we want the GPIO and MFGPT drivers to be able to share a PCI + * device. Instead, we manually scan for the PCI device, request a single + * region, and keep track of the devices that we're using. + */ + +static int __init cs5535_mfgpt_scan_pci(void) +{ + struct pci_dev *pdev; + int err = -ENODEV; + int i; + + for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) { + pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor, + cs5535_mfgpt_pci_tbl[i].device, NULL); + if (pdev) { + err = cs5535_mfgpt_probe(pdev, + &cs5535_mfgpt_pci_tbl[i]); + if (err) + pci_dev_put(pdev); + + /* we only support a single CS5535/6 southbridge */ + break; + } + } + + return err; +} + +static int __init cs5535_mfgpt_init(void) +{ + return cs5535_mfgpt_scan_pci(); +} + +module_init(cs5535_mfgpt_init); + +MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>"); +MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 2c27193aeaa..f939ebc2507 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -32,9 +32,6 @@ static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(eeprom); - /* Size of EEPROM in bytes */ #define EEPROM_SIZE 256 @@ -135,8 +132,7 @@ static struct bin_attribute eeprom_attr = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int eeprom_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; @@ -233,7 +229,7 @@ static struct i2c_driver eeprom_driver = { .class = I2C_CLASS_DDC | I2C_CLASS_SPD, .detect = eeprom_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init eeprom_init(void) diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index 4bb7a3af9ad..395a4ea64e9 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -30,9 +30,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ics932s401); - /* ICS932S401 registers */ #define ICS932S401_REG_CFG2 0x01 #define ICS932S401_CFG1_SPREAD 0x01 @@ -106,12 +103,12 @@ struct ics932s401_data { static int ics932s401_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int ics932s401_detect(struct i2c_client *client, int kind, +static int ics932s401_detect(struct i2c_client *client, struct i2c_board_info *info); static int ics932s401_remove(struct i2c_client *client); static const struct i2c_device_id ics932s401_id[] = { - { "ics932s401", ics932s401 }, + { "ics932s401", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ics932s401_id); @@ -125,7 +122,7 @@ static struct i2c_driver ics932s401_driver = { .remove = ics932s401_remove, .id_table = ics932s401_id, .detect = ics932s401_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static struct ics932s401_data *ics932s401_update_device(struct device *dev) @@ -413,7 +410,7 @@ static ssize_t show_spread(struct device *dev, } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int ics932s401_detect(struct i2c_client *client, int kind, +static int ics932s401_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index 60b0b1a4fb3..09dcb699e66 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c @@ -138,7 +138,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is) * even though the following code utilizes external interrupt registers * to perform the speed calculation. */ -static void +static void __devinit ioc4_clock_calibrate(struct ioc4_driver_data *idd) { union ioc4_int_out int_out; @@ -230,7 +230,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd) * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. * If neither is present, it's a PCI-RT. */ -static unsigned int +static unsigned int __devinit ioc4_variant(struct ioc4_driver_data *idd) { struct pci_dev *pdev = NULL; @@ -269,7 +269,7 @@ ioc4_variant(struct ioc4_driver_data *idd) return IOC4_VARIANT_PCI_RT; } -static void +static void __devinit ioc4_load_modules(struct work_struct *work) { /* arg just has to be freed */ @@ -280,7 +280,7 @@ ioc4_load_modules(struct work_struct *work) } /* Adds a new instance of an IOC4 card */ -static int +static int __devinit ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct ioc4_driver_data *idd; @@ -425,7 +425,7 @@ out: } /* Removes a particular instance of an IOC4 card. */ -static void +static void __devexit ioc4_remove(struct pci_dev *pdev) { struct ioc4_submodule *is; @@ -476,7 +476,7 @@ static struct pci_driver ioc4_driver = { .name = "IOC4", .id_table = ioc4_id_table, .probe = ioc4_probe, - .remove = ioc4_remove, + .remove = __devexit_p(ioc4_remove), }; MODULE_DEVICE_TABLE(pci, ioc4_id_table); @@ -486,14 +486,14 @@ MODULE_DEVICE_TABLE(pci, ioc4_id_table); *********************/ /* Module load */ -static int __devinit +static int __init ioc4_init(void) { return pci_register_driver(&ioc4_driver); } /* Module unload */ -static void __devexit +static void __exit ioc4_exit(void) { /* Ensure ioc4_load_modules() has completed before exiting */ diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c new file mode 100644 index 00000000000..d3f229a3a77 --- /dev/null +++ b/drivers/misc/ti_dac7512.c @@ -0,0 +1,101 @@ +/* + * dac7512.c - Linux kernel module for + * Texas Instruments DAC7512 + * + * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/spi/spi.h> + +#define DAC7512_DRV_NAME "dac7512" +#define DRIVER_VERSION "1.0" + +static ssize_t dac7512_store_val(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct spi_device *spi = to_spi_device(dev); + unsigned char tmp[2]; + unsigned long val; + + if (strict_strtoul(buf, 10, &val) < 0) + return -EINVAL; + + tmp[0] = val >> 8; + tmp[1] = val & 0xff; + spi_write(spi, tmp, sizeof(tmp)); + return count; +} + +static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val); + +static struct attribute *dac7512_attributes[] = { + &dev_attr_value.attr, + NULL +}; + +static const struct attribute_group dac7512_attr_group = { + .attrs = dac7512_attributes, +}; + +static int __devinit dac7512_probe(struct spi_device *spi) +{ + int ret; + + spi->bits_per_word = 8; + spi->mode = SPI_MODE_0; + ret = spi_setup(spi); + if (ret < 0) + return ret; + + return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group); +} + +static int __devexit dac7512_remove(struct spi_device *spi) +{ + sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group); + return 0; +} + +static struct spi_driver dac7512_driver = { + .driver = { + .name = DAC7512_DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = dac7512_probe, + .remove = __devexit_p(dac7512_remove), +}; + +static int __init dac7512_init(void) +{ + return spi_register_driver(&dac7512_driver); +} + +static void __exit dac7512_exit(void) +{ + spi_unregister_driver(&dac7512_driver); +} + +MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); +MODULE_DESCRIPTION("DAC7512 16-bit DAC"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRIVER_VERSION); + +module_init(dac7512_init); +module_exit(dac7512_exit); diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index ab37a6d9d32..bb22ffd76ef 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -3,7 +3,7 @@ # config MMC_UNSAFE_RESUME - bool "Allow unsafe resume (DANGEROUS)" + bool "Assume MMC/SD cards are non-removable (DANGEROUS)" help If you say Y here, the MMC layer will assume that all cards stayed in their respective slots during the suspend. The @@ -14,3 +14,5 @@ config MMC_UNSAFE_RESUME This option is usually just for embedded systems which use a MMC/SD card for rootfs. Most people should say N here. + This option sets a default which can be overridden by the + module parameter "removable=0" or "removable=1". diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7dab2e5f4bc..30acd526582 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -48,6 +48,22 @@ int use_spi_crc = 1; module_param(use_spi_crc, bool, 0); /* + * We normally treat cards as removed during suspend if they are not + * known to be on a non-removable bus, to avoid the risk of writing + * back data to a different card after resume. Allow this to be + * overridden if necessary. + */ +#ifdef CONFIG_MMC_UNSAFE_RESUME +int mmc_assume_removable; +#else +int mmc_assume_removable = 1; +#endif +module_param_named(removable, mmc_assume_removable, bool, 0644); +MODULE_PARM_DESC( + removable, + "MMC/SD cards are removable and may be removed during suspend"); + +/* * Internal function. Schedule delayed work in the MMC work queue. */ static int mmc_schedule_delayed_work(struct delayed_work *work, diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 67ae6abc423..a811c52a165 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -54,7 +54,9 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr); int mmc_attach_sd(struct mmc_host *host, u32 ocr); int mmc_attach_sdio(struct mmc_host *host, u32 ocr); +/* Module parameters */ extern int use_spi_crc; +extern int mmc_assume_removable; /* Debugfs information for hosts and cards */ void mmc_add_host_debugfs(struct mmc_host *host); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index bfefce365ae..c11189446a1 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -602,25 +602,6 @@ static int mmc_awake(struct mmc_host *host) return err; } -#ifdef CONFIG_MMC_UNSAFE_RESUME - -static const struct mmc_bus_ops mmc_ops = { - .awake = mmc_awake, - .sleep = mmc_sleep, - .remove = mmc_remove, - .detect = mmc_detect, - .suspend = mmc_suspend, - .resume = mmc_resume, - .power_restore = mmc_power_restore, -}; - -static void mmc_attach_bus_ops(struct mmc_host *host) -{ - mmc_attach_bus(host, &mmc_ops); -} - -#else - static const struct mmc_bus_ops mmc_ops = { .awake = mmc_awake, .sleep = mmc_sleep, @@ -645,15 +626,13 @@ static void mmc_attach_bus_ops(struct mmc_host *host) { const struct mmc_bus_ops *bus_ops; - if (host->caps & MMC_CAP_NONREMOVABLE) + if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable) bus_ops = &mmc_ops_unsafe; else bus_ops = &mmc_ops; mmc_attach_bus(host, bus_ops); } -#endif - /* * Starting point for MMC card init. */ diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 10b2a4d20f5..fdd414eded0 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -606,23 +606,6 @@ static void mmc_sd_power_restore(struct mmc_host *host) mmc_release_host(host); } -#ifdef CONFIG_MMC_UNSAFE_RESUME - -static const struct mmc_bus_ops mmc_sd_ops = { - .remove = mmc_sd_remove, - .detect = mmc_sd_detect, - .suspend = mmc_sd_suspend, - .resume = mmc_sd_resume, - .power_restore = mmc_sd_power_restore, -}; - -static void mmc_sd_attach_bus_ops(struct mmc_host *host) -{ - mmc_attach_bus(host, &mmc_sd_ops); -} - -#else - static const struct mmc_bus_ops mmc_sd_ops = { .remove = mmc_sd_remove, .detect = mmc_sd_detect, @@ -643,15 +626,13 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host) { const struct mmc_bus_ops *bus_ops; - if (host->caps & MMC_CAP_NONREMOVABLE) + if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable) bus_ops = &mmc_sd_ops_unsafe; else bus_ops = &mmc_sd_ops; mmc_attach_bus(host, bus_ops); } -#endif - /* * Starting point for SD card init. */ diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index f85dcd53650..9538389783c 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -97,26 +97,56 @@ static const unsigned char speed_val[16] = static const unsigned int speed_unit[8] = { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; -/* FUNCE tuples with these types get passed to SDIO drivers */ -static const unsigned char funce_type_whitelist[] = { - 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */ + +typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, + const unsigned char *, unsigned); + +struct cis_tpl { + unsigned char code; + unsigned char min_size; + tpl_parse_t *parse; }; -static int cistpl_funce_whitelisted(unsigned char type) +static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func, + const char *tpl_descr, + const struct cis_tpl *tpl, int tpl_count, + unsigned char code, + const unsigned char *buf, unsigned size) { - int i; + int i, ret; - for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) { - if (funce_type_whitelist[i] == type) - return 1; + /* look for a matching code in the table */ + for (i = 0; i < tpl_count; i++, tpl++) { + if (tpl->code == code) + break; } - return 0; + if (i < tpl_count) { + if (size >= tpl->min_size) { + if (tpl->parse) + ret = tpl->parse(card, func, buf, size); + else + ret = -EILSEQ; /* known tuple, not parsed */ + } else { + /* invalid tuple */ + ret = -EINVAL; + } + if (ret && ret != -EILSEQ && ret != -ENOENT) { + printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n", + mmc_hostname(card->host), tpl_descr, code, size); + } + } else { + /* unknown tuple */ + ret = -ENOENT; + } + + return ret; } -static int cistpl_funce_common(struct mmc_card *card, +static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { - if (size < 0x04 || buf[0] != 0) + /* Only valid for the common CIS (function 0) */ + if (func) return -EINVAL; /* TPLFE_FN0_BLK_SIZE */ @@ -129,20 +159,24 @@ static int cistpl_funce_common(struct mmc_card *card, return 0; } -static int cistpl_funce_func(struct sdio_func *func, +static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { unsigned vsn; unsigned min_size; - /* let SDIO drivers take care of whitelisted FUNCE tuples */ - if (cistpl_funce_whitelisted(buf[0])) - return -EILSEQ; + /* Only valid for the individual function's CIS (1-7) */ + if (!func) + return -EINVAL; + /* + * This tuple has a different length depending on the SDIO spec + * version. + */ vsn = func->card->cccr.sdio_vsn; min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; - if (size < min_size || buf[0] != 1) + if (size < min_size) return -EINVAL; /* TPLFE_MAX_BLK_SIZE */ @@ -157,39 +191,32 @@ static int cistpl_funce_func(struct sdio_func *func, return 0; } +/* + * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples. + * + * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending + * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO + * TPLFID_FUNCTION is always hardcoded to 0x0C. + */ +static const struct cis_tpl cis_tpl_funce_list[] = { + { 0x00, 4, cistpl_funce_common }, + { 0x01, 0, cistpl_funce_func }, + { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ }, +}; + static int cistpl_funce(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { - int ret; - - /* - * There should be two versions of the CISTPL_FUNCE tuple, - * one for the common CIS (function 0) and a version used by - * the individual function's CIS (1-7). Yet, the later has a - * different length depending on the SDIO spec version. - */ - if (func) - ret = cistpl_funce_func(func, buf, size); - else - ret = cistpl_funce_common(card, buf, size); - - if (ret && ret != -EILSEQ) { - printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " - "type %u\n", mmc_hostname(card->host), size, buf[0]); - } + if (size < 1) + return -EINVAL; - return ret; + return cis_tpl_parse(card, func, "CISTPL_FUNCE", + cis_tpl_funce_list, + ARRAY_SIZE(cis_tpl_funce_list), + buf[0], buf, size); } -typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, - const unsigned char *, unsigned); - -struct cis_tpl { - unsigned char code; - unsigned char min_size; - tpl_parse_t *parse; -}; - +/* Known TPL_CODEs table for CIS tuples */ static const struct cis_tpl cis_tpl_list[] = { { 0x15, 3, cistpl_vers_1 }, { 0x20, 4, cistpl_manfid }, @@ -268,46 +295,38 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) break; } - for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) - if (cis_tpl_list[i].code == tpl_code) - break; - if (i < ARRAY_SIZE(cis_tpl_list)) { - const struct cis_tpl *tpl = cis_tpl_list + i; - if (tpl_link < tpl->min_size) { - printk(KERN_ERR - "%s: bad CIS tuple 0x%02x" - " (length = %u, expected >= %u)\n", - mmc_hostname(card->host), - tpl_code, tpl_link, tpl->min_size); - ret = -EINVAL; - } else if (tpl->parse) { - ret = tpl->parse(card, func, - this->data, tpl_link); - } + /* Try to parse the CIS tuple */ + ret = cis_tpl_parse(card, func, "CIS", + cis_tpl_list, ARRAY_SIZE(cis_tpl_list), + tpl_code, this->data, tpl_link); + if (ret == -EILSEQ || ret == -ENOENT) { /* - * We don't need the tuple anymore if it was - * successfully parsed by the SDIO core or if it is - * not going to be parsed by SDIO drivers. + * The tuple is unknown or known but not parsed. + * Queue the tuple for the function driver. */ - if (!ret || ret != -EILSEQ) - kfree(this); - } else { - /* unknown tuple */ - ret = -EILSEQ; - } - - if (ret == -EILSEQ) { - /* this tuple is unknown to the core or whitelisted */ this->next = NULL; this->code = tpl_code; this->size = tpl_link; *prev = this; prev = &this->next; - printk(KERN_DEBUG - "%s: queuing CIS tuple 0x%02x length %u\n", - mmc_hostname(card->host), tpl_code, tpl_link); + + if (ret == -ENOENT) { + /* warn about unknown tuples */ + printk(KERN_WARNING "%s: queuing unknown" + " CIS tuple 0x%02x (%u bytes)\n", + mmc_hostname(card->host), + tpl_code, tpl_link); + } + /* keep on analyzing tuples */ ret = 0; + } else { + /* + * We don't need the tuple anymore if it was + * successfully parsed by the SDIO core or if it is + * not going to be queued for a driver. + */ + kfree(this); } ptr += tpl_link; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index e04b751680d..9d405b18178 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -251,6 +251,14 @@ config MMC_MVSDIO To compile this driver as a module, choose M here: the module will be called mvsdio. +config MMC_DAVINCI + tristate "TI DAVINCI Multimedia Card Interface support" + depends on ARCH_DAVINCI + help + This selects the TI DAVINCI Multimedia card Interface. + If you have an DAVINCI board with a Multimedia Card slot, + say Y or M here. If unsure, say N. + config MMC_SPI tristate "MMC/SD/SDIO over SPI" depends on SPI_MASTER && !HIGHMEM && HAS_DMA @@ -357,3 +365,22 @@ config MMC_VIA_SDMMC If you have a controller with this interface, say Y or M here. If unsure, say N. + +config SDH_BFIN + tristate "Blackfin Secure Digital Host support" + depends on MMC && ((BF54x && !BF544) || (BF51x && !BF512)) + help + If you say yes here you will get support for the Blackfin on-chip + Secure Digital Host interface. This includes support for MMC and + SD cards. + + To compile this driver as a module, choose M here: the + module will be called bfin_sdh. + + If unsure, say N. + +config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND + bool "Blackfin EZkit Missing SDH_CMD Pull Up Resistor Workaround" + depends on SDH_BFIN + help + If you say yes here SD-Cards may work on the EZkit. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index abcb0400e06..ded4d8cdd9d 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o +obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o obj-$(CONFIG_MMC_SPI) += mmc_spi.o ifeq ($(CONFIG_OF),y) obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o @@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o +obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index fc25586b7ee..8072128e933 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -25,6 +25,8 @@ #include <linux/stat.h> #include <linux/mmc/host.h> + +#include <mach/atmel-mci.h> #include <linux/atmel-mci.h> #include <asm/io.h> @@ -92,6 +94,7 @@ struct atmel_mci_dma { * @need_clock_update: Update the clock rate before the next request. * @need_reset: Reset controller before next request. * @mode_reg: Value of the MR register. + * @cfg_reg: Value of the CFG register. * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus * rate and timeout calculations. * @mapbase: Physical address of the MMIO registers. @@ -155,6 +158,7 @@ struct atmel_mci { bool need_clock_update; bool need_reset; u32 mode_reg; + u32 cfg_reg; unsigned long bus_hz; unsigned long mapbase; struct clk *mck; @@ -223,6 +227,19 @@ static bool mci_has_rwproof(void) } /* + * The new MCI2 module isn't 100% compatible with the old MCI module, + * and it has a few nice features which we want to use... + */ +static inline bool atmci_is_mci2(void) +{ + if (cpu_is_at91sam9g45()) + return true; + + return false; +} + + +/* * The debugfs stuff below is mostly optimized away when * CONFIG_DEBUG_FS is not set. */ @@ -357,12 +374,33 @@ static int atmci_regs_show(struct seq_file *s, void *v) buf[MCI_BLKR / 4], buf[MCI_BLKR / 4] & 0xffff, (buf[MCI_BLKR / 4] >> 16) & 0xffff); + if (atmci_is_mci2()) + seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]); /* Don't read RSPR and RDR; it will consume the data there */ atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); + if (atmci_is_mci2()) { + u32 val; + + val = buf[MCI_DMA / 4]; + seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", + val, val & 3, + ((val >> 4) & 3) ? + 1 << (((val >> 4) & 3) + 1) : 1, + val & MCI_DMAEN ? " DMAEN" : ""); + + val = buf[MCI_CFG / 4]; + seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", + val, + val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", + val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", + val & MCI_CFG_HSMODE ? " HSMODE" : "", + val & MCI_CFG_LSYNC ? " LSYNC" : ""); + } + kfree(buf); return 0; @@ -557,6 +595,10 @@ static void atmci_dma_complete(void *arg) dev_vdbg(&host->pdev->dev, "DMA complete\n"); + if (atmci_is_mci2()) + /* Disable DMA hardware handshaking on MCI */ + mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN); + atmci_dma_cleanup(host); /* @@ -592,7 +634,7 @@ static void atmci_dma_complete(void *arg) } static int -atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) +atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) { struct dma_chan *chan; struct dma_async_tx_descriptor *desc; @@ -624,6 +666,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) if (!chan) return -ENODEV; + if (atmci_is_mci2()) + mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN); + if (data->flags & MMC_DATA_READ) direction = DMA_FROM_DEVICE; else @@ -641,10 +686,6 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) host->dma.data_desc = desc; desc->callback = atmci_dma_complete; desc->callback_param = host; - desc->tx_submit(desc); - - /* Go! */ - chan->device->device_issue_pending(chan); return 0; unmap_exit: @@ -652,13 +693,26 @@ unmap_exit: return -ENOMEM; } +static void atmci_submit_data(struct atmel_mci *host) +{ + struct dma_chan *chan = host->data_chan; + struct dma_async_tx_descriptor *desc = host->dma.data_desc; + + if (chan) { + desc->tx_submit(desc); + chan->device->device_issue_pending(chan); + } +} + #else /* CONFIG_MMC_ATMELMCI_DMA */ -static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) +static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) { return -ENOSYS; } +static void atmci_submit_data(struct atmel_mci *host) {} + static void atmci_stop_dma(struct atmel_mci *host) { /* Data transfer was stopped by the interrupt handler */ @@ -672,7 +726,7 @@ static void atmci_stop_dma(struct atmel_mci *host) * Returns a mask of interrupt flags to be enabled after the whole * request has been prepared. */ -static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) +static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) { u32 iflags; @@ -683,7 +737,7 @@ static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) host->data = data; iflags = ATMCI_DATA_ERROR_FLAGS; - if (atmci_submit_data_dma(host, data)) { + if (atmci_prepare_data_dma(host, data)) { host->data_chan = NULL; /* @@ -729,6 +783,8 @@ static void atmci_start_request(struct atmel_mci *host, mci_writel(host, CR, MCI_CR_SWRST); mci_writel(host, CR, MCI_CR_MCIEN); mci_writel(host, MR, host->mode_reg); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); host->need_reset = false; } mci_writel(host, SDCR, slot->sdc_reg); @@ -744,6 +800,7 @@ static void atmci_start_request(struct atmel_mci *host, while (!(mci_readl(host, SR) & MCI_CMDRDY)) cpu_relax(); } + iflags = 0; data = mrq->data; if (data) { atmci_set_timeout(host, slot, data); @@ -753,15 +810,17 @@ static void atmci_start_request(struct atmel_mci *host, | MCI_BLKLEN(data->blksz)); dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); + + iflags |= atmci_prepare_data(host, data); } - iflags = MCI_CMDRDY; + iflags |= MCI_CMDRDY; cmd = mrq->cmd; cmdflags = atmci_prepare_command(slot->mmc, cmd); atmci_start_command(host, cmd, cmdflags); if (data) - iflags |= atmci_submit_data(host, data); + atmci_submit_data(host); if (mrq->stop) { host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); @@ -857,6 +916,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) clk_enable(host->mck); mci_writel(host, CR, MCI_CR_SWRST); mci_writel(host, CR, MCI_CR_MCIEN); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); } /* @@ -1095,6 +1156,8 @@ static void atmci_detect_change(unsigned long data) mci_writel(host, CR, MCI_CR_SWRST); mci_writel(host, CR, MCI_CR_MCIEN); mci_writel(host, MR, host->mode_reg); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); host->data = NULL; host->cmd = NULL; @@ -1584,14 +1647,47 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, #ifdef CONFIG_MMC_ATMELMCI_DMA static bool filter(struct dma_chan *chan, void *slave) { - struct dw_dma_slave *dws = slave; + struct mci_dma_data *sl = slave; - if (dws->dma_dev == chan->device->dev) { - chan->private = dws; + if (sl && find_slave_dev(sl) == chan->device->dev) { + chan->private = slave_data_ptr(sl); return true; - } else + } else { return false; + } } + +static void atmci_configure_dma(struct atmel_mci *host) +{ + struct mci_platform_data *pdata; + + if (host == NULL) + return; + + pdata = host->pdev->dev.platform_data; + + if (pdata && find_slave_dev(pdata->dma_slave)) { + dma_cap_mask_t mask; + + setup_dma_addr(pdata->dma_slave, + host->mapbase + MCI_TDR, + host->mapbase + MCI_RDR); + + /* Try to grab a DMA channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma.chan = + dma_request_channel(mask, filter, pdata->dma_slave); + } + if (!host->dma.chan) + dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); + else + dev_info(&host->pdev->dev, + "Using %s for DMA transfers\n", + dma_chan_name(host->dma.chan)); +} +#else +static void atmci_configure_dma(struct atmel_mci *host) {} #endif static int __init atmci_probe(struct platform_device *pdev) @@ -1645,22 +1741,7 @@ static int __init atmci_probe(struct platform_device *pdev) if (ret) goto err_request_irq; -#ifdef CONFIG_MMC_ATMELMCI_DMA - if (pdata->dma_slave.dma_dev) { - struct dw_dma_slave *dws = &pdata->dma_slave; - dma_cap_mask_t mask; - - dws->tx_reg = regs->start + MCI_TDR; - dws->rx_reg = regs->start + MCI_RDR; - - /* Try to grab a DMA channel */ - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - host->dma.chan = dma_request_channel(mask, filter, dws); - } - if (!host->dma.chan) - dev_notice(&pdev->dev, "DMA not available, using PIO\n"); -#endif /* CONFIG_MMC_ATMELMCI_DMA */ + atmci_configure_dma(host); platform_set_drvdata(pdev, host); diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c new file mode 100644 index 00000000000..3343a57355c --- /dev/null +++ b/drivers/mmc/host/bfin_sdh.c @@ -0,0 +1,639 @@ +/* + * bfin_sdh.c - Analog Devices Blackfin SDH Controller + * + * Copyright (C) 2007-2009 Analog Device Inc. + * + * Licensed under the GPL-2 or later. + */ + +#define DRIVER_NAME "bfin-sdh" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/mmc/host.h> +#include <linux/proc_fs.h> + +#include <asm/cacheflush.h> +#include <asm/dma.h> +#include <asm/portmux.h> +#include <asm/bfin_sdh.h> + +#if defined(CONFIG_BF51x) +#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL +#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL +#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL +#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL +#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT +#define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND +#define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER +#define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0 +#define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1 +#define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2 +#define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3 +#define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH +#define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL +#define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL +#define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT +#define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR +#define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS +#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS +#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS +#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 +#define bfin_read_SDH_CFG bfin_read_RSI_CFG +#define bfin_write_SDH_CFG bfin_write_RSI_CFG +#endif + +struct dma_desc_array { + unsigned long start_addr; + unsigned short cfg; + unsigned short x_count; + short x_modify; +} __packed; + +struct sdh_host { + struct mmc_host *mmc; + spinlock_t lock; + struct resource *res; + void __iomem *base; + int irq; + int stat_irq; + int dma_ch; + int dma_dir; + struct dma_desc_array *sg_cpu; + dma_addr_t sg_dma; + int dma_len; + + unsigned int imask; + unsigned int power_mode; + unsigned int clk_div; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; +}; + +static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev) +{ + return pdev->dev.platform_data; +} + +static void sdh_stop_clock(struct sdh_host *host) +{ + bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E); + SSYNC(); +} + +static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->imask |= mask; + bfin_write_SDH_MASK0(mask); + SSYNC(); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->imask &= ~mask; + bfin_write_SDH_MASK0(host->imask); + SSYNC(); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) +{ + unsigned int length; + unsigned int data_ctl; + unsigned int dma_cfg; + struct scatterlist *sg; + + dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags); + host->data = data; + data_ctl = 0; + dma_cfg = 0; + + length = data->blksz * data->blocks; + bfin_write_SDH_DATA_LGTH(length); + + if (data->flags & MMC_DATA_STREAM) + data_ctl |= DTX_MODE; + + if (data->flags & MMC_DATA_READ) + data_ctl |= DTX_DIR; + /* Only supports power-of-2 block size */ + if (data->blksz & (data->blksz - 1)) + return -EINVAL; + data_ctl |= ((ffs(data->blksz) - 1) << 4); + + bfin_write_SDH_DATA_CTL(data_ctl); + + bfin_write_SDH_DATA_TIMER(0xFFFF); + SSYNC(); + + if (data->flags & MMC_DATA_READ) { + host->dma_dir = DMA_FROM_DEVICE; + dma_cfg |= WNR; + } else + host->dma_dir = DMA_TO_DEVICE; + + sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); + host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); +#if defined(CONFIG_BF54x) + dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; + { + int i; + for_each_sg(data->sg, sg, host->dma_len, i) { + host->sg_cpu[i].start_addr = sg_dma_address(sg); + host->sg_cpu[i].cfg = dma_cfg; + host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; + host->sg_cpu[i].x_modify = 4; + dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " + "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", + i, host->sg_cpu[i].start_addr, + host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, + host->sg_cpu[i].x_modify); + } + } + flush_dcache_range((unsigned int)host->sg_cpu, + (unsigned int)host->sg_cpu + + host->dma_len * sizeof(struct dma_desc_array)); + /* Set the last descriptor to stop mode */ + host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE); + host->sg_cpu[host->dma_len - 1].cfg |= DI_EN; + + set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); + set_dma_x_count(host->dma_ch, 0); + set_dma_x_modify(host->dma_ch, 0); + set_dma_config(host->dma_ch, dma_cfg); +#elif defined(CONFIG_BF51x) + /* RSI DMA doesn't work in array mode */ + dma_cfg |= WDSIZE_32 | DMAEN; + set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); + set_dma_x_count(host->dma_ch, length / 4); + set_dma_x_modify(host->dma_ch, 4); + set_dma_config(host->dma_ch, dma_cfg); +#endif + bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); + + SSYNC(); + + dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__); + return 0; +} + +static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd) +{ + unsigned int sdh_cmd; + unsigned int stat_mask; + + dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd); + WARN_ON(host->cmd != NULL); + host->cmd = cmd; + + sdh_cmd = 0; + stat_mask = 0; + + sdh_cmd |= cmd->opcode; + + if (cmd->flags & MMC_RSP_PRESENT) { + sdh_cmd |= CMD_RSP; + stat_mask |= CMD_RESP_END; + } else { + stat_mask |= CMD_SENT; + } + + if (cmd->flags & MMC_RSP_136) + sdh_cmd |= CMD_L_RSP; + + stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT; + + sdh_enable_stat_irq(host, stat_mask); + + bfin_write_SDH_ARGUMENT(cmd->arg); + bfin_write_SDH_COMMAND(sdh_cmd | CMD_E); + bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E); + SSYNC(); +} + +static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq) +{ + dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + mmc_request_done(host->mmc, mrq); +} + +static int sdh_cmd_done(struct sdh_host *host, unsigned int stat) +{ + struct mmc_command *cmd = host->cmd; + int ret = 0; + + dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd); + if (!cmd) + return 0; + + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT) { + cmd->resp[0] = bfin_read_SDH_RESPONSE0(); + if (cmd->flags & MMC_RSP_136) { + cmd->resp[1] = bfin_read_SDH_RESPONSE1(); + cmd->resp[2] = bfin_read_SDH_RESPONSE2(); + cmd->resp[3] = bfin_read_SDH_RESPONSE3(); + } + } + if (stat & CMD_TIME_OUT) + cmd->error = -ETIMEDOUT; + else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC) + cmd->error = -EILSEQ; + + sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)); + + if (host->data && !cmd->error) { + if (host->data->flags & MMC_DATA_WRITE) { + ret = sdh_setup_data(host, host->data); + if (ret) + return 0; + } + + sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT); + } else + sdh_finish_request(host, host->mrq); + + return 1; +} + +static int sdh_data_done(struct sdh_host *host, unsigned int stat) +{ + struct mmc_data *data = host->data; + + dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat); + if (!data) + return 0; + + disable_dma(host->dma_ch); + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + host->dma_dir); + + if (stat & DAT_TIME_OUT) + data->error = -ETIMEDOUT; + else if (stat & DAT_CRC_FAIL) + data->error = -EILSEQ; + else if (stat & (RX_OVERRUN | TX_UNDERRUN)) + data->error = -EIO; + + if (!data->error) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN); + bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ + DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); + bfin_write_SDH_DATA_CTL(0); + SSYNC(); + + host->data = NULL; + if (host->mrq->stop) { + sdh_stop_clock(host); + sdh_start_cmd(host, host->mrq->stop); + } else { + sdh_finish_request(host, host->mrq); + } + + return 1; +} + +static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdh_host *host = mmc_priv(mmc); + int ret = 0; + + dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); + WARN_ON(host->mrq != NULL); + + host->mrq = mrq; + host->data = mrq->data; + + if (mrq->data && mrq->data->flags & MMC_DATA_READ) { + ret = sdh_setup_data(host, mrq->data); + if (ret) + return; + } + + sdh_start_cmd(host, mrq->cmd); +} + +static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdh_host *host; + unsigned long flags; + u16 clk_ctl = 0; + u16 pwr_ctl = 0; + u16 cfg; + host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + if (ios->clock) { + unsigned long sys_clk, ios_clk; + unsigned char clk_div; + ios_clk = 2 * ios->clock; + sys_clk = get_sclk(); + clk_div = sys_clk / ios_clk; + if (sys_clk % ios_clk == 0) + clk_div -= 1; + clk_div = min_t(unsigned char, clk_div, 0xFF); + clk_ctl |= clk_div; + clk_ctl |= CLK_E; + host->clk_div = clk_div; + } else + sdh_stop_clock(host); + + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) +#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND + pwr_ctl |= ROD_CTL; +#else + pwr_ctl |= SD_CMD_OD | ROD_CTL; +#endif + + if (ios->bus_width == MMC_BUS_WIDTH_4) { + cfg = bfin_read_SDH_CFG(); + cfg &= ~PD_SDDAT3; + cfg |= PUP_SDDAT3; + /* Enable 4 bit SDIO */ + cfg |= (SD4E | MWE); + bfin_write_SDH_CFG(cfg); + clk_ctl |= WIDE_BUS; + } else { + cfg = bfin_read_SDH_CFG(); + cfg |= MWE; + bfin_write_SDH_CFG(cfg); + } + + bfin_write_SDH_CLK_CTL(clk_ctl); + + host->power_mode = ios->power_mode; + if (ios->power_mode == MMC_POWER_ON) + pwr_ctl |= PWR_ON; + + bfin_write_SDH_PWR_CTL(pwr_ctl); + SSYNC(); + + spin_unlock_irqrestore(&host->lock, flags); + + dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", + host->clk_div, + host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0, + ios->clock); +} + +static const struct mmc_host_ops sdh_ops = { + .request = sdh_request, + .set_ios = sdh_set_ios, +}; + +static irqreturn_t sdh_dma_irq(int irq, void *devid) +{ + struct sdh_host *host = devid; + + dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, + get_dma_curr_irqstat(host->dma_ch)); + clear_dma_irqstat(host->dma_ch); + SSYNC(); + + return IRQ_HANDLED; +} + +static irqreturn_t sdh_stat_irq(int irq, void *devid) +{ + struct sdh_host *host = devid; + unsigned int status; + int handled = 0; + + dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); + status = bfin_read_SDH_E_STATUS(); + if (status & SD_CARD_DET) { + mmc_detect_change(host->mmc, 0); + bfin_write_SDH_E_STATUS(SD_CARD_DET); + } + status = bfin_read_SDH_STATUS(); + if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) { + handled |= sdh_cmd_done(host, status); + bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \ + CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT); + SSYNC(); + } + + status = bfin_read_SDH_STATUS(); + if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) + handled |= sdh_data_done(host, status); + + dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); + + return IRQ_RETVAL(handled); +} + +static int __devinit sdh_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct sdh_host *host; + struct bfin_sd_host *drv_data = get_sdh_data(pdev); + int ret; + + if (!drv_data) { + dev_err(&pdev->dev, "missing platform driver data\n"); + ret = -EINVAL; + goto out; + } + + mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto out; + } + + mmc->ops = &sdh_ops; + mmc->max_phys_segs = 32; + mmc->max_seg_size = 1 << 16; + mmc->max_blk_size = 1 << 11; + mmc->max_blk_count = 1 << 11; + mmc->max_req_size = PAGE_SIZE; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->f_max = get_sclk(); + mmc->f_min = mmc->f_max >> 9; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; + host = mmc_priv(mmc); + host->mmc = mmc; + + spin_lock_init(&host->lock); + host->irq = drv_data->irq_int0; + host->dma_ch = drv_data->dma_chan; + + ret = request_dma(host->dma_ch, DRIVER_NAME "DMA"); + if (ret) { + dev_err(&pdev->dev, "unable to request DMA channel\n"); + goto out1; + } + + ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host); + if (ret) { + dev_err(&pdev->dev, "unable to request DMA irq\n"); + goto out2; + } + + host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); + if (host->sg_cpu == NULL) { + ret = -ENOMEM; + goto out2; + } + + platform_set_drvdata(pdev, mmc); + mmc_add_host(mmc); + + ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); + if (ret) { + dev_err(&pdev->dev, "unable to request status irq\n"); + goto out3; + } + + ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "unable to request peripheral pins\n"); + goto out4; + } +#if defined(CONFIG_BF54x) + /* Secure Digital Host shares DMA with Nand controller */ + bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); +#endif + + bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); + SSYNC(); + + /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and + * mmc stack will do the detection. + */ + bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); + SSYNC(); + + return 0; + +out4: + free_irq(host->irq, host); +out3: + mmc_remove_host(mmc); + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); +out2: + free_dma(host->dma_ch); +out1: + mmc_free_host(mmc); + out: + return ret; +} + +static int __devexit sdh_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + if (mmc) { + struct sdh_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + + sdh_stop_clock(host); + free_irq(host->irq, host); + free_dma(host->dma_ch); + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); + + mmc_free_host(mmc); + } + + return 0; +} + +#ifdef CONFIG_PM +static int sdh_suspend(struct platform_device *dev, pm_message_t state) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + struct bfin_sd_host *drv_data = get_sdh_data(dev); + int ret = 0; + + if (mmc) + ret = mmc_suspend_host(mmc, state); + + bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); + peripheral_free_list(drv_data->pin_req); + + return ret; +} + +static int sdh_resume(struct platform_device *dev) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + struct bfin_sd_host *drv_data = get_sdh_data(dev); + int ret = 0; + + ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME); + if (ret) { + dev_err(&dev->dev, "unable to request peripheral pins\n"); + return ret; + } + + bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); +#if defined(CONFIG_BF54x) + /* Secure Digital Host shares DMA with Nand controller */ + bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); +#endif + bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); + SSYNC(); + + bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); + SSYNC(); + + if (mmc) + ret = mmc_resume_host(mmc); + + return ret; +} +#else +# define sdh_suspend NULL +# define sdh_resume NULL +#endif + +static struct platform_driver sdh_driver = { + .probe = sdh_probe, + .remove = __devexit_p(sdh_remove), + .suspend = sdh_suspend, + .resume = sdh_resume, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init sdh_init(void) +{ + return platform_driver_register(&sdh_driver); +} +module_init(sdh_init); + +static void __exit sdh_exit(void) +{ + platform_driver_unregister(&sdh_driver); +} +module_exit(sdh_exit); + +MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver"); +MODULE_AUTHOR("Cliff Cai, Roy Huang"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c new file mode 100644 index 00000000000..dd45e7c3517 --- /dev/null +++ b/drivers/mmc/host/davinci_mmc.c @@ -0,0 +1,1349 @@ +/* + * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver + * + * Copyright (C) 2006 Texas Instruments. + * Original author: Purushotam Kumar + * Copyright (C) 2009 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/cpufreq.h> +#include <linux/mmc/host.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/mmc/mmc.h> + +#include <mach/mmc.h> +#include <mach/edma.h> + +/* + * Register Definitions + */ +#define DAVINCI_MMCCTL 0x00 /* Control Register */ +#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ +#define DAVINCI_MMCST0 0x08 /* Status Register 0 */ +#define DAVINCI_MMCST1 0x0C /* Status Register 1 */ +#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ +#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ +#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ +#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ +#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ +#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ +#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ +#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ +#define DAVINCI_MMCCMD 0x30 /* Command Register */ +#define DAVINCI_MMCARGHL 0x34 /* Argument Register */ +#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ +#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ +#define DAVINCI_MMCETOK 0x4C +#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ +#define DAVINCI_MMCCKC 0x54 +#define DAVINCI_MMCTORC 0x58 +#define DAVINCI_MMCTODC 0x5C +#define DAVINCI_MMCBLNC 0x60 +#define DAVINCI_SDIOCTL 0x64 +#define DAVINCI_SDIOST0 0x68 +#define DAVINCI_SDIOEN 0x6C +#define DAVINCI_SDIOST 0x70 +#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ + +/* DAVINCI_MMCCTL definitions */ +#define MMCCTL_DATRST (1 << 0) +#define MMCCTL_CMDRST (1 << 1) +#define MMCCTL_WIDTH_4_BIT (1 << 2) +#define MMCCTL_DATEG_DISABLED (0 << 6) +#define MMCCTL_DATEG_RISING (1 << 6) +#define MMCCTL_DATEG_FALLING (2 << 6) +#define MMCCTL_DATEG_BOTH (3 << 6) +#define MMCCTL_PERMDR_LE (0 << 9) +#define MMCCTL_PERMDR_BE (1 << 9) +#define MMCCTL_PERMDX_LE (0 << 10) +#define MMCCTL_PERMDX_BE (1 << 10) + +/* DAVINCI_MMCCLK definitions */ +#define MMCCLK_CLKEN (1 << 8) +#define MMCCLK_CLKRT_MASK (0xFF << 0) + +/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ +#define MMCST0_DATDNE BIT(0) /* data done */ +#define MMCST0_BSYDNE BIT(1) /* busy done */ +#define MMCST0_RSPDNE BIT(2) /* command done */ +#define MMCST0_TOUTRD BIT(3) /* data read timeout */ +#define MMCST0_TOUTRS BIT(4) /* command response timeout */ +#define MMCST0_CRCWR BIT(5) /* data write CRC error */ +#define MMCST0_CRCRD BIT(6) /* data read CRC error */ +#define MMCST0_CRCRS BIT(7) /* command response CRC error */ +#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ +#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ +#define MMCST0_DATED BIT(11) /* DAT3 edge detect */ +#define MMCST0_TRNDNE BIT(12) /* transfer done */ + +/* DAVINCI_MMCST1 definitions */ +#define MMCST1_BUSY (1 << 0) + +/* DAVINCI_MMCCMD definitions */ +#define MMCCMD_CMD_MASK (0x3F << 0) +#define MMCCMD_PPLEN (1 << 7) +#define MMCCMD_BSYEXP (1 << 8) +#define MMCCMD_RSPFMT_MASK (3 << 9) +#define MMCCMD_RSPFMT_NONE (0 << 9) +#define MMCCMD_RSPFMT_R1456 (1 << 9) +#define MMCCMD_RSPFMT_R2 (2 << 9) +#define MMCCMD_RSPFMT_R3 (3 << 9) +#define MMCCMD_DTRW (1 << 11) +#define MMCCMD_STRMTP (1 << 12) +#define MMCCMD_WDATX (1 << 13) +#define MMCCMD_INITCK (1 << 14) +#define MMCCMD_DCLR (1 << 15) +#define MMCCMD_DMATRIG (1 << 16) + +/* DAVINCI_MMCFIFOCTL definitions */ +#define MMCFIFOCTL_FIFORST (1 << 0) +#define MMCFIFOCTL_FIFODIR_WR (1 << 1) +#define MMCFIFOCTL_FIFODIR_RD (0 << 1) +#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ +#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ +#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ +#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ +#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ + + +/* MMCSD Init clock in Hz in opendrain mode */ +#define MMCSD_INIT_CLOCK 200000 + +/* + * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, + * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only + * for drivers with max_hw_segs == 1, making the segments bigger (64KB) + * than the page or two that's otherwise typical. NR_SG == 16 gives at + * least the same throughput boost, using EDMA transfer linkage instead + * of spending CPU time copying pages. + */ +#define MAX_CCNT ((1 << 16) - 1) + +#define NR_SG 16 + +static unsigned rw_threshold = 32; +module_param(rw_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(rw_threshold, + "Read/Write threshold. Default = 32"); + +static unsigned __initdata use_dma = 1; +module_param(use_dma, uint, 0); +MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); + +struct mmc_davinci_host { + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_host *mmc; + struct clk *clk; + unsigned int mmc_input_clk; + void __iomem *base; + struct resource *mem_res; + int irq; + unsigned char bus_mode; + +#define DAVINCI_MMC_DATADIR_NONE 0 +#define DAVINCI_MMC_DATADIR_READ 1 +#define DAVINCI_MMC_DATADIR_WRITE 2 + unsigned char data_dir; + + /* buffer is used during PIO of one scatterlist segment, and + * is updated along with buffer_bytes_left. bytes_left applies + * to all N blocks of the PIO transfer. + */ + u8 *buffer; + u32 buffer_bytes_left; + u32 bytes_left; + + u32 rxdma, txdma; + bool use_dma; + bool do_dma; + + /* Scatterlist DMA uses one or more parameter RAM entries: + * the main one (associated with rxdma or txdma) plus zero or + * more links. The entries for a given transfer differ only + * by memory buffer (address, length) and link field. + */ + struct edmacc_param tx_template; + struct edmacc_param rx_template; + unsigned n_link; + u32 links[NR_SG - 1]; + + /* For PIO we walk scatterlists one segment at a time. */ + unsigned int sg_len; + struct scatterlist *sg; + + /* Version of the MMC/SD controller */ + u8 version; + /* for ns in one cycle calculation */ + unsigned ns_in_one_cycle; +#ifdef CONFIG_CPU_FREQ + struct notifier_block freq_transition; +#endif +}; + + +/* PIO only */ +static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) +{ + host->buffer_bytes_left = sg_dma_len(host->sg); + host->buffer = sg_virt(host->sg); + if (host->buffer_bytes_left > host->bytes_left) + host->buffer_bytes_left = host->bytes_left; +} + +static void davinci_fifo_data_trans(struct mmc_davinci_host *host, + unsigned int n) +{ + u8 *p; + unsigned int i; + + if (host->buffer_bytes_left == 0) { + host->sg = sg_next(host->data->sg); + mmc_davinci_sg_to_buf(host); + } + + p = host->buffer; + if (n > host->buffer_bytes_left) + n = host->buffer_bytes_left; + host->buffer_bytes_left -= n; + host->bytes_left -= n; + + /* NOTE: we never transfer more than rw_threshold bytes + * to/from the fifo here; there's no I/O overlap. + * This also assumes that access width( i.e. ACCWD) is 4 bytes + */ + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + for (i = 0; i < (n >> 2); i++) { + writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); + p = p + 4; + } + if (n & 3) { + iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); + p = p + (n & 3); + } + } else { + for (i = 0; i < (n >> 2); i++) { + *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); + p = p + 4; + } + if (n & 3) { + ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); + p = p + (n & 3); + } + } + host->buffer = p; +} + +static void mmc_davinci_start_command(struct mmc_davinci_host *host, + struct mmc_command *cmd) +{ + u32 cmd_reg = 0; + u32 im_val; + + dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", + cmd->opcode, cmd->arg, + ({ char *s; + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + s = ", R1/R5/R6/R7 response"; + break; + case MMC_RSP_R1B: + s = ", R1b response"; + break; + case MMC_RSP_R2: + s = ", R2 response"; + break; + case MMC_RSP_R3: + s = ", R3/R4 response"; + break; + default: + s = ", (R? response)"; + break; + }; s; })); + host->cmd = cmd; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1B: + /* There's some spec confusion about when R1B is + * allowed, but if the card doesn't issue a BUSY + * then it's harmless for us to allow it. + */ + cmd_reg |= MMCCMD_BSYEXP; + /* FALLTHROUGH */ + case MMC_RSP_R1: /* 48 bits, CRC */ + cmd_reg |= MMCCMD_RSPFMT_R1456; + break; + case MMC_RSP_R2: /* 136 bits, CRC */ + cmd_reg |= MMCCMD_RSPFMT_R2; + break; + case MMC_RSP_R3: /* 48 bits, no CRC */ + cmd_reg |= MMCCMD_RSPFMT_R3; + break; + default: + cmd_reg |= MMCCMD_RSPFMT_NONE; + dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", + mmc_resp_type(cmd)); + break; + } + + /* Set command index */ + cmd_reg |= cmd->opcode; + + /* Enable EDMA transfer triggers */ + if (host->do_dma) + cmd_reg |= MMCCMD_DMATRIG; + + if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && + host->data_dir == DAVINCI_MMC_DATADIR_READ) + cmd_reg |= MMCCMD_DMATRIG; + + /* Setting whether command involves data transfer or not */ + if (cmd->data) + cmd_reg |= MMCCMD_WDATX; + + /* Setting whether stream or block transfer */ + if (cmd->flags & MMC_DATA_STREAM) + cmd_reg |= MMCCMD_STRMTP; + + /* Setting whether data read or write */ + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) + cmd_reg |= MMCCMD_DTRW; + + if (host->bus_mode == MMC_BUSMODE_PUSHPULL) + cmd_reg |= MMCCMD_PPLEN; + + /* set Command timeout */ + writel(0x1FFF, host->base + DAVINCI_MMCTOR); + + /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ + im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + im_val |= MMCST0_DATDNE | MMCST0_CRCWR; + + if (!host->do_dma) + im_val |= MMCST0_DXRDY; + } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { + im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; + + if (!host->do_dma) + im_val |= MMCST0_DRRDY; + } + + /* + * Before non-DMA WRITE commands the controller needs priming: + * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size + */ + if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) + davinci_fifo_data_trans(host, rw_threshold); + + writel(cmd->arg, host->base + DAVINCI_MMCARGHL); + writel(cmd_reg, host->base + DAVINCI_MMCCMD); + writel(im_val, host->base + DAVINCI_MMCIM); +} + +/*----------------------------------------------------------------------*/ + +/* DMA infrastructure */ + +static void davinci_abort_dma(struct mmc_davinci_host *host) +{ + int sync_dev; + + if (host->data_dir == DAVINCI_MMC_DATADIR_READ) + sync_dev = host->rxdma; + else + sync_dev = host->txdma; + + edma_stop(sync_dev); + edma_clean_channel(sync_dev); +} + +static void +mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); + +static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) +{ + if (DMA_COMPLETE != ch_status) { + struct mmc_davinci_host *host = data; + + /* Currently means: DMA Event Missed, or "null" transfer + * request was seen. In the future, TC errors (like bad + * addresses) might be presented too. + */ + dev_warn(mmc_dev(host->mmc), "DMA %s error\n", + (host->data->flags & MMC_DATA_WRITE) + ? "write" : "read"); + host->data->error = -EIO; + mmc_davinci_xfer_done(host, host->data); + } +} + +/* Set up tx or rx template, to be modified and updated later */ +static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, + bool tx, struct edmacc_param *template) +{ + unsigned sync_dev; + const u16 acnt = 4; + const u16 bcnt = rw_threshold >> 2; + const u16 ccnt = 0; + u32 src_port = 0; + u32 dst_port = 0; + s16 src_bidx, dst_bidx; + s16 src_cidx, dst_cidx; + + /* + * A-B Sync transfer: each DMA request is for one "frame" of + * rw_threshold bytes, broken into "acnt"-size chunks repeated + * "bcnt" times. Each segment needs "ccnt" such frames; since + * we tell the block layer our mmc->max_seg_size limit, we can + * trust (later) that it's within bounds. + * + * The FIFOs are read/written in 4-byte chunks (acnt == 4) and + * EDMA will optimize memory operations to use larger bursts. + */ + if (tx) { + sync_dev = host->txdma; + + /* src_prt, ccnt, and link to be set up later */ + src_bidx = acnt; + src_cidx = acnt * bcnt; + + dst_port = host->mem_res->start + DAVINCI_MMCDXR; + dst_bidx = 0; + dst_cidx = 0; + } else { + sync_dev = host->rxdma; + + src_port = host->mem_res->start + DAVINCI_MMCDRR; + src_bidx = 0; + src_cidx = 0; + + /* dst_prt, ccnt, and link to be set up later */ + dst_bidx = acnt; + dst_cidx = acnt * bcnt; + } + + /* + * We can't use FIFO mode for the FIFOs because MMC FIFO addresses + * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT + * parameter is ignored. + */ + edma_set_src(sync_dev, src_port, INCR, W8BIT); + edma_set_dest(sync_dev, dst_port, INCR, W8BIT); + + edma_set_src_index(sync_dev, src_bidx, src_cidx); + edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); + + edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); + + edma_read_slot(sync_dev, template); + + /* don't bother with irqs or chaining */ + template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; +} + +static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, + struct mmc_data *data) +{ + struct edmacc_param *template; + int channel, slot; + unsigned link; + struct scatterlist *sg; + unsigned sg_len; + unsigned bytes_left = host->bytes_left; + const unsigned shift = ffs(rw_threshold) - 1;; + + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + template = &host->tx_template; + channel = host->txdma; + } else { + template = &host->rx_template; + channel = host->rxdma; + } + + /* We know sg_len and ccnt will never be out of range because + * we told the mmc layer which in turn tells the block layer + * to ensure that it only hands us one scatterlist segment + * per EDMA PARAM entry. Update the PARAM + * entries needed for each segment of this scatterlist. + */ + for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; + sg_len-- != 0 && bytes_left; + sg = sg_next(sg), slot = host->links[link++]) { + u32 buf = sg_dma_address(sg); + unsigned count = sg_dma_len(sg); + + template->link_bcntrld = sg_len + ? (EDMA_CHAN_SLOT(host->links[link]) << 5) + : 0xffff; + + if (count > bytes_left) + count = bytes_left; + bytes_left -= count; + + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) + template->src = buf; + else + template->dst = buf; + template->ccnt = count >> shift; + + edma_write_slot(slot, template); + } + + if (host->version == MMC_CTLR_VERSION_2) + edma_clear_event(channel); + + edma_start(channel); +} + +static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, + struct mmc_data *data) +{ + int i; + int mask = rw_threshold - 1; + + host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + ((data->flags & MMC_DATA_WRITE) + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE)); + + /* no individual DMA segment should need a partial FIFO */ + for (i = 0; i < host->sg_len; i++) { + if (sg_dma_len(data->sg + i) & mask) { + dma_unmap_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + (data->flags & MMC_DATA_WRITE) + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + return -1; + } + } + + host->do_dma = 1; + mmc_davinci_send_dma_request(host, data); + + return 0; +} + +static void __init_or_module +davinci_release_dma_channels(struct mmc_davinci_host *host) +{ + unsigned i; + + if (!host->use_dma) + return; + + for (i = 0; i < host->n_link; i++) + edma_free_slot(host->links[i]); + + edma_free_channel(host->txdma); + edma_free_channel(host->rxdma); +} + +static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) +{ + int r, i; + + /* Acquire master DMA write channel */ + r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, + EVENTQ_DEFAULT); + if (r < 0) { + dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", + "tx", r); + return r; + } + mmc_davinci_dma_setup(host, true, &host->tx_template); + + /* Acquire master DMA read channel */ + r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, + EVENTQ_DEFAULT); + if (r < 0) { + dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", + "rx", r); + goto free_master_write; + } + mmc_davinci_dma_setup(host, false, &host->rx_template); + + /* Allocate parameter RAM slots, which will later be bound to a + * channel as needed to handle a scatterlist. + */ + for (i = 0; i < ARRAY_SIZE(host->links); i++) { + r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); + if (r < 0) { + dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", + r); + break; + } + host->links[i] = r; + } + host->n_link = i; + + return 0; + +free_master_write: + edma_free_channel(host->txdma); + + return r; +} + +/*----------------------------------------------------------------------*/ + +static void +mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) +{ + int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; + int timeout; + struct mmc_data *data = req->data; + + if (host->version == MMC_CTLR_VERSION_2) + fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; + + host->data = data; + if (data == NULL) { + host->data_dir = DAVINCI_MMC_DATADIR_NONE; + writel(0, host->base + DAVINCI_MMCBLEN); + writel(0, host->base + DAVINCI_MMCNBLK); + return; + } + + dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", + (data->flags & MMC_DATA_STREAM) ? "stream" : "block", + (data->flags & MMC_DATA_WRITE) ? "write" : "read", + data->blocks, data->blksz); + dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", + data->timeout_clks, data->timeout_ns); + timeout = data->timeout_clks + + (data->timeout_ns / host->ns_in_one_cycle); + if (timeout > 0xffff) + timeout = 0xffff; + + writel(timeout, host->base + DAVINCI_MMCTOD); + writel(data->blocks, host->base + DAVINCI_MMCNBLK); + writel(data->blksz, host->base + DAVINCI_MMCBLEN); + + /* Configure the FIFO */ + switch (data->flags & MMC_DATA_WRITE) { + case MMC_DATA_WRITE: + host->data_dir = DAVINCI_MMC_DATADIR_WRITE; + writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, + host->base + DAVINCI_MMCFIFOCTL); + writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, + host->base + DAVINCI_MMCFIFOCTL); + break; + + default: + host->data_dir = DAVINCI_MMC_DATADIR_READ; + writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, + host->base + DAVINCI_MMCFIFOCTL); + writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, + host->base + DAVINCI_MMCFIFOCTL); + break; + } + + host->buffer = NULL; + host->bytes_left = data->blocks * data->blksz; + + /* For now we try to use DMA whenever we won't need partial FIFO + * reads or writes, either for the whole transfer (as tested here) + * or for any individual scatterlist segment (tested when we call + * start_dma_transfer). + * + * While we *could* change that, unusual block sizes are rarely + * used. The occasional fallback to PIO should't hurt. + */ + if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 + && mmc_davinci_start_dma_transfer(host, data) == 0) { + /* zero this to ensure we take no PIO paths */ + host->bytes_left = 0; + } else { + /* Revert to CPU Copy */ + host->sg_len = data->sg_len; + host->sg = host->data->sg; + mmc_davinci_sg_to_buf(host); + } +} + +static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct mmc_davinci_host *host = mmc_priv(mmc); + unsigned long timeout = jiffies + msecs_to_jiffies(900); + u32 mmcst1 = 0; + + /* Card may still be sending BUSY after a previous operation, + * typically some kind of write. If so, we can't proceed yet. + */ + while (time_before(jiffies, timeout)) { + mmcst1 = readl(host->base + DAVINCI_MMCST1); + if (!(mmcst1 & MMCST1_BUSY)) + break; + cpu_relax(); + } + if (mmcst1 & MMCST1_BUSY) { + dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); + req->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, req); + return; + } + + host->do_dma = 0; + mmc_davinci_prepare_data(host, req); + mmc_davinci_start_command(host, req->cmd); +} + +static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, + unsigned int mmc_req_freq) +{ + unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; + + mmc_pclk = host->mmc_input_clk; + if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) + mmc_push_pull_divisor = ((unsigned int)mmc_pclk + / (2 * mmc_req_freq)) - 1; + else + mmc_push_pull_divisor = 0; + + mmc_freq = (unsigned int)mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)); + + if (mmc_freq > mmc_req_freq) + mmc_push_pull_divisor = mmc_push_pull_divisor + 1; + /* Convert ns to clock cycles */ + if (mmc_req_freq <= 400000) + host->ns_in_one_cycle = (1000000) / (((mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)))/1000)); + else + host->ns_in_one_cycle = (1000000) / (((mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)))/1000000)); + + return mmc_push_pull_divisor; +} + +static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) +{ + unsigned int open_drain_freq = 0, mmc_pclk = 0; + unsigned int mmc_push_pull_freq = 0; + struct mmc_davinci_host *host = mmc_priv(mmc); + + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { + u32 temp; + + /* Ignoring the init clock value passed for fixing the inter + * operability with different cards. + */ + open_drain_freq = ((unsigned int)mmc_pclk + / (2 * MMCSD_INIT_CLOCK)) - 1; + + if (open_drain_freq > 0xFF) + open_drain_freq = 0xFF; + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; + temp |= open_drain_freq; + writel(temp, host->base + DAVINCI_MMCCLK); + + /* Convert ns to clock cycles */ + host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); + } else { + u32 temp; + mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); + + if (mmc_push_pull_freq > 0xFF) + mmc_push_pull_freq = 0xFF; + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; + writel(temp, host->base + DAVINCI_MMCCLK); + + udelay(10); + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; + temp |= mmc_push_pull_freq; + writel(temp, host->base + DAVINCI_MMCCLK); + + writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); + + udelay(10); + } +} + +static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + unsigned int mmc_pclk = 0; + struct mmc_davinci_host *host = mmc_priv(mmc); + + mmc_pclk = host->mmc_input_clk; + dev_dbg(mmc_dev(host->mmc), + "clock %dHz busmode %d powermode %d Vdd %04x\n", + ios->clock, ios->bus_mode, ios->power_mode, + ios->vdd); + if (ios->bus_width == MMC_BUS_WIDTH_4) { + dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); + writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT, + host->base + DAVINCI_MMCCTL); + } else { + dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n"); + writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT, + host->base + DAVINCI_MMCCTL); + } + + calculate_clk_divider(mmc, ios); + + host->bus_mode = ios->bus_mode; + if (ios->power_mode == MMC_POWER_UP) { + unsigned long timeout = jiffies + msecs_to_jiffies(50); + bool lose = true; + + /* Send clock cycles, poll completion */ + writel(0, host->base + DAVINCI_MMCARGHL); + writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); + while (time_before(jiffies, timeout)) { + u32 tmp = readl(host->base + DAVINCI_MMCST0); + + if (tmp & MMCST0_RSPDNE) { + lose = false; + break; + } + cpu_relax(); + } + if (lose) + dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); + } + + /* FIXME on power OFF, reset things ... */ +} + +static void +mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) +{ + host->data = NULL; + + if (host->do_dma) { + davinci_abort_dma(host); + + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + (data->flags & MMC_DATA_WRITE) + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + host->do_dma = false; + } + host->data_dir = DAVINCI_MMC_DATADIR_NONE; + + if (!data->stop || (host->cmd && host->cmd->error)) { + mmc_request_done(host->mmc, data->mrq); + writel(0, host->base + DAVINCI_MMCIM); + } else + mmc_davinci_start_command(host, data->stop); +} + +static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, + struct mmc_command *cmd) +{ + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* response type 2 */ + cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); + cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); + cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); + cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); + } else { + /* response types 1, 1b, 3, 4, 5, 6 */ + cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); + } + } + + if (host->data == NULL || cmd->error) { + if (cmd->error == -ETIMEDOUT) + cmd->mrq->cmd->retries = 0; + mmc_request_done(host->mmc, cmd->mrq); + writel(0, host->base + DAVINCI_MMCIM); + } +} + +static void +davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) +{ + u32 temp; + + /* reset command and data state machines */ + temp = readl(host->base + DAVINCI_MMCCTL); + writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, + host->base + DAVINCI_MMCCTL); + + temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); + udelay(10); + writel(temp, host->base + DAVINCI_MMCCTL); +} + +static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) +{ + struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; + unsigned int status, qstatus; + int end_command = 0; + int end_transfer = 0; + struct mmc_data *data = host->data; + + if (host->cmd == NULL && host->data == NULL) { + status = readl(host->base + DAVINCI_MMCST0); + dev_dbg(mmc_dev(host->mmc), + "Spurious interrupt 0x%04x\n", status); + /* Disable the interrupt from mmcsd */ + writel(0, host->base + DAVINCI_MMCIM); + return IRQ_NONE; + } + + status = readl(host->base + DAVINCI_MMCST0); + qstatus = status; + + /* handle FIFO first when using PIO for data. + * bytes_left will decrease to zero as I/O progress and status will + * read zero over iteration because this controller status + * register(MMCST0) reports any status only once and it is cleared + * by read. So, it is not unbouned loop even in the case of + * non-dma. + */ + while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { + davinci_fifo_data_trans(host, rw_threshold); + status = readl(host->base + DAVINCI_MMCST0); + if (!status) + break; + qstatus |= status; + } + + if (qstatus & MMCST0_DATDNE) { + /* All blocks sent/received, and CRC checks passed */ + if (data != NULL) { + if ((host->do_dma == 0) && (host->bytes_left > 0)) { + /* if datasize < rw_threshold + * no RX ints are generated + */ + davinci_fifo_data_trans(host, host->bytes_left); + } + end_transfer = 1; + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_err(mmc_dev(host->mmc), + "DATDNE with no host->data\n"); + } + } + + if (qstatus & MMCST0_TOUTRD) { + /* Read data timeout */ + data->error = -ETIMEDOUT; + end_transfer = 1; + + dev_dbg(mmc_dev(host->mmc), + "read data timeout, status %x\n", + qstatus); + + davinci_abort_data(host, data); + } + + if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { + /* Data CRC error */ + data->error = -EILSEQ; + end_transfer = 1; + + /* NOTE: this controller uses CRCWR to report both CRC + * errors and timeouts (on writes). MMCDRSP values are + * only weakly documented, but 0x9f was clearly a timeout + * case and the two three-bit patterns in various SD specs + * (101, 010) aren't part of it ... + */ + if (qstatus & MMCST0_CRCWR) { + u32 temp = readb(host->base + DAVINCI_MMCDRSP); + + if (temp == 0x9f) + data->error = -ETIMEDOUT; + } + dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", + (qstatus & MMCST0_CRCWR) ? "write" : "read", + (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); + + davinci_abort_data(host, data); + } + + if (qstatus & MMCST0_TOUTRS) { + /* Command timeout */ + if (host->cmd) { + dev_dbg(mmc_dev(host->mmc), + "CMD%d timeout, status %x\n", + host->cmd->opcode, qstatus); + host->cmd->error = -ETIMEDOUT; + if (data) { + end_transfer = 1; + davinci_abort_data(host, data); + } else + end_command = 1; + } + } + + if (qstatus & MMCST0_CRCRS) { + /* Command CRC error */ + dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); + if (host->cmd) { + host->cmd->error = -EILSEQ; + end_command = 1; + } + } + + if (qstatus & MMCST0_RSPDNE) { + /* End of command phase */ + end_command = (int) host->cmd; + } + + if (end_command) + mmc_davinci_cmd_done(host, host->cmd); + if (end_transfer) + mmc_davinci_xfer_done(host, data); + return IRQ_HANDLED; +} + +static int mmc_davinci_get_cd(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *config = pdev->dev.platform_data; + + if (!config || !config->get_cd) + return -ENOSYS; + return config->get_cd(pdev->id); +} + +static int mmc_davinci_get_ro(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *config = pdev->dev.platform_data; + + if (!config || !config->get_ro) + return -ENOSYS; + return config->get_ro(pdev->id); +} + +static struct mmc_host_ops mmc_davinci_ops = { + .request = mmc_davinci_request, + .set_ios = mmc_davinci_set_ios, + .get_cd = mmc_davinci_get_cd, + .get_ro = mmc_davinci_get_ro, +}; + +/*----------------------------------------------------------------------*/ + +#ifdef CONFIG_CPU_FREQ +static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct mmc_davinci_host *host; + unsigned int mmc_pclk; + struct mmc_host *mmc; + unsigned long flags; + + host = container_of(nb, struct mmc_davinci_host, freq_transition); + mmc = host->mmc; + mmc_pclk = clk_get_rate(host->clk); + + if (val == CPUFREQ_POSTCHANGE) { + spin_lock_irqsave(&mmc->lock, flags); + host->mmc_input_clk = mmc_pclk; + calculate_clk_divider(mmc, &mmc->ios); + spin_unlock_irqrestore(&mmc->lock, flags); + } + + return 0; +} + +static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) +{ + host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; + + return cpufreq_register_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) +{ + cpufreq_unregister_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} +#else +static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) +{ + return 0; +} + +static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) +{ +} +#endif +static void __init init_mmcsd_host(struct mmc_davinci_host *host) +{ + /* DAT line portion is diabled and in reset state */ + writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST, + host->base + DAVINCI_MMCCTL); + + /* CMD line portion is diabled and in reset state */ + writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST, + host->base + DAVINCI_MMCCTL); + + udelay(10); + + writel(0, host->base + DAVINCI_MMCCLK); + writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); + + writel(0x1FFF, host->base + DAVINCI_MMCTOR); + writel(0xFFFF, host->base + DAVINCI_MMCTOD); + + writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, + host->base + DAVINCI_MMCCTL); + writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST, + host->base + DAVINCI_MMCCTL); + + udelay(10); +} + +static int __init davinci_mmcsd_probe(struct platform_device *pdev) +{ + struct davinci_mmc_config *pdata = pdev->dev.platform_data; + struct mmc_davinci_host *host = NULL; + struct mmc_host *mmc = NULL; + struct resource *r, *mem = NULL; + int ret = 0, irq = 0; + size_t mem_size; + + /* REVISIT: when we're fully converted, fail if pdata is NULL */ + + ret = -ENODEV; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!r || irq == NO_IRQ) + goto out; + + ret = -EBUSY; + mem_size = resource_size(r); + mem = request_mem_region(r->start, mem_size, pdev->name); + if (!mem) + goto out; + + ret = -ENOMEM; + mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); + if (!mmc) + goto out; + + host = mmc_priv(mmc); + host->mmc = mmc; /* Important */ + + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!r) + goto out; + host->rxdma = r->start; + + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (!r) + goto out; + host->txdma = r->start; + + host->mem_res = mem; + host->base = ioremap(mem->start, mem_size); + if (!host->base) + goto out; + + ret = -ENXIO; + host->clk = clk_get(&pdev->dev, "MMCSDCLK"); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto out; + } + clk_enable(host->clk); + host->mmc_input_clk = clk_get_rate(host->clk); + + init_mmcsd_host(host); + + host->use_dma = use_dma; + host->irq = irq; + + if (host->use_dma && davinci_acquire_dma_channels(host) != 0) + host->use_dma = 0; + + /* REVISIT: someday, support IRQ-driven card detection. */ + mmc->caps |= MMC_CAP_NEEDS_POLL; + + if (!pdata || pdata->wires == 4 || pdata->wires == 0) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + host->version = pdata->version; + + mmc->ops = &mmc_davinci_ops; + mmc->f_min = 312500; + mmc->f_max = 25000000; + if (pdata && pdata->max_freq) + mmc->f_max = pdata->max_freq; + if (pdata && pdata->caps) + mmc->caps |= pdata->caps; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + /* With no iommu coalescing pages, each phys_seg is a hw_seg. + * Each hw_seg uses one EDMA parameter RAM slot, always one + * channel and then usually some linked slots. + */ + mmc->max_hw_segs = 1 + host->n_link; + mmc->max_phys_segs = mmc->max_hw_segs; + + /* EDMA limit per hw segment (one or two MBytes) */ + mmc->max_seg_size = MAX_CCNT * rw_threshold; + + /* MMC/SD controller limits for multiblock requests */ + mmc->max_blk_size = 4095; /* BLEN is 12 bits */ + mmc->max_blk_count = 65535; /* NBLK is 16 bits */ + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + + dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs); + dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs); + dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); + dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); + dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); + + platform_set_drvdata(pdev, host); + + ret = mmc_davinci_cpufreq_register(host); + if (ret) { + dev_err(&pdev->dev, "failed to register cpufreq\n"); + goto cpu_freq_fail; + } + + ret = mmc_add_host(mmc); + if (ret < 0) + goto out; + + ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); + if (ret) + goto out; + + rename_region(mem, mmc_hostname(mmc)); + + dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", + host->use_dma ? "DMA" : "PIO", + (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); + + return 0; + +out: + mmc_davinci_cpufreq_deregister(host); +cpu_freq_fail: + if (host) { + davinci_release_dma_channels(host); + + if (host->clk) { + clk_disable(host->clk); + clk_put(host->clk); + } + + if (host->base) + iounmap(host->base); + } + + if (mmc) + mmc_free_host(mmc); + + if (mem) + release_resource(mem); + + dev_dbg(&pdev->dev, "probe err %d\n", ret); + + return ret; +} + +static int __exit davinci_mmcsd_remove(struct platform_device *pdev) +{ + struct mmc_davinci_host *host = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + if (host) { + mmc_davinci_cpufreq_deregister(host); + + mmc_remove_host(host->mmc); + free_irq(host->irq, host); + + davinci_release_dma_channels(host); + + clk_disable(host->clk); + clk_put(host->clk); + + iounmap(host->base); + + release_resource(host->mem_res); + + mmc_free_host(host->mmc); + } + + return 0; +} + +#ifdef CONFIG_PM +static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) +{ + struct mmc_davinci_host *host = platform_get_drvdata(pdev); + + return mmc_suspend_host(host->mmc, msg); +} + +static int davinci_mmcsd_resume(struct platform_device *pdev) +{ + struct mmc_davinci_host *host = platform_get_drvdata(pdev); + + return mmc_resume_host(host->mmc); +} +#else +#define davinci_mmcsd_suspend NULL +#define davinci_mmcsd_resume NULL +#endif + +static struct platform_driver davinci_mmcsd_driver = { + .driver = { + .name = "davinci_mmc", + .owner = THIS_MODULE, + }, + .remove = __exit_p(davinci_mmcsd_remove), + .suspend = davinci_mmcsd_suspend, + .resume = davinci_mmcsd_resume, +}; + +static int __init davinci_mmcsd_init(void) +{ + return platform_driver_probe(&davinci_mmcsd_driver, + davinci_mmcsd_probe); +} +module_init(davinci_mmcsd_init); + +static void __exit davinci_mmcsd_exit(void) +{ + platform_driver_unregister(&davinci_mmcsd_driver); +} +module_exit(davinci_mmcsd_exit); + +MODULE_AUTHOR("Texas Instruments India"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); + diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 88671529c45..60a2b69e54f 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -679,17 +679,17 @@ static int mxcmci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct mxcmci_host *host = NULL; - struct resource *r; + struct resource *iores, *r; int ret = 0, irq; printk(KERN_INFO "i.MX SDHC driver\n"); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) + if (!iores || irq < 0) return -EINVAL; - r = request_mem_region(r->start, resource_size(r), pdev->name); + r = request_mem_region(iores->start, resource_size(iores), pdev->name); if (!r) return -EBUSY; @@ -809,7 +809,7 @@ out_iounmap: out_free: mmc_free_host(mmc); out_release_mem: - release_mem_region(host->res->start, resource_size(host->res)); + release_mem_region(iores->start, resource_size(iores)); return ret; } diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 5f970e253e5..c6d7e8ecadb 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1459,8 +1459,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev) goto err_ioremap; host->iclk = clk_get(&pdev->dev, "ick"); - if (IS_ERR(host->iclk)) + if (IS_ERR(host->iclk)) { + ret = PTR_ERR(host->iclk); goto err_free_mmc_host; + } clk_enable(host->iclk); host->fclk = clk_get(&pdev->dev, "fck"); @@ -1500,10 +1502,8 @@ err_free_irq: err_free_fclk: clk_put(host->fclk); err_free_iclk: - if (host->iclk != NULL) { - clk_disable(host->iclk); - clk_put(host->iclk); - } + clk_disable(host->iclk); + clk_put(host->iclk); err_free_mmc_host: iounmap(host->virt_base); err_ioremap: diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index bb47ff465c0..0d783f3e79e 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -828,7 +828,7 @@ static int pxamci_resume(struct device *dev) return ret; } -static struct dev_pm_ops pxamci_pm_ops = { +static const struct dev_pm_ops pxamci_pm_ops = { .suspend = pxamci_suspend, .resume = pxamci_resume, }; diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 941a4d35ef8..d96e1abf2d6 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -820,7 +820,7 @@ fail_request: static void finalize_request(struct s3cmci_host *host) { struct mmc_request *mrq = host->mrq; - struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + struct mmc_command *cmd; int debug_as_failure = 0; if (host->complete_what != COMPLETION_FINALIZE) @@ -828,6 +828,7 @@ static void finalize_request(struct s3cmci_host *host) if (!mrq) return; + cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; if (cmd->data && (cmd->error == 0) && (cmd->data->error == 0)) { @@ -1302,10 +1303,8 @@ static int s3cmci_get_ro(struct mmc_host *mmc) if (pdata->no_wprotect) return 0; - ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); - - if (pdata->wprotect_invert) - ret = !ret; + ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0; + ret ^= pdata->wprotect_invert; return ret; } @@ -1654,7 +1653,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev) goto probe_free_irq; } - host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); + host->irq_cd = gpio_to_irq(host->pdata->gpio_detect); if (host->irq_cd >= 0) { if (request_irq(host->irq_cd, s3cmci_irq_cd, @@ -1892,7 +1891,7 @@ static int s3cmci_resume(struct device *dev) return mmc_resume_host(mmc); } -static struct dev_pm_ops s3cmci_pm = { +static const struct dev_pm_ops s3cmci_pm = { .suspend = s3cmci_suspend, .resume = s3cmci_resume, }; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index e0356644d1a..5c3a1767770 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -285,6 +285,73 @@ static const struct sdhci_pci_fixes sdhci_jmicron = { .resume = jmicron_resume, }; +/* SysKonnect CardBus2SDIO extra registers */ +#define SYSKT_CTRL 0x200 +#define SYSKT_RDFIFO_STAT 0x204 +#define SYSKT_WRFIFO_STAT 0x208 +#define SYSKT_POWER_DATA 0x20c +#define SYSKT_POWER_330 0xef +#define SYSKT_POWER_300 0xf8 +#define SYSKT_POWER_184 0xcc +#define SYSKT_POWER_CMD 0x20d +#define SYSKT_POWER_START (1 << 7) +#define SYSKT_POWER_STATUS 0x20e +#define SYSKT_POWER_STATUS_OK (1 << 0) +#define SYSKT_BOARD_REV 0x210 +#define SYSKT_CHIP_REV 0x211 +#define SYSKT_CONF_DATA 0x212 +#define SYSKT_CONF_DATA_1V8 (1 << 2) +#define SYSKT_CONF_DATA_2V5 (1 << 1) +#define SYSKT_CONF_DATA_3V3 (1 << 0) + +static int syskt_probe(struct sdhci_pci_chip *chip) +{ + if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { + chip->pdev->class &= ~0x0000FF; + chip->pdev->class |= PCI_SDHCI_IFDMA; + } + return 0; +} + +static int syskt_probe_slot(struct sdhci_pci_slot *slot) +{ + int tm, ps; + + u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); + u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); + dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " + "board rev %d.%d, chip rev %d.%d\n", + board_rev >> 4, board_rev & 0xf, + chip_rev >> 4, chip_rev & 0xf); + if (chip_rev >= 0x20) + slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; + + writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); + writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); + udelay(50); + tm = 10; /* Wait max 1 ms */ + do { + ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); + if (ps & SYSKT_POWER_STATUS_OK) + break; + udelay(100); + } while (--tm); + if (!tm) { + dev_err(&slot->chip->pdev->dev, + "power regulator never stabilized"); + writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); + return -ENODEV; + } + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_syskt = { + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, + .probe = syskt_probe, + .probe_slot = syskt_probe_slot, +}; + static int via_probe(struct sdhci_pci_chip *chip) { if (chip->pdev->revision == 0x10) @@ -363,6 +430,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { }, { + .vendor = PCI_VENDOR_ID_SYSKONNECT, + .device = 0x8000, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_syskt, + }, + + { .vendor = PCI_VENDOR_ID_VIA, .device = 0x95d0, .subvendor = PCI_ANY_ID, diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 91991b460c4..7cccc852374 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -591,7 +591,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) disable_mmc_irqs(host, TMIO_MASK_ALL); ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | - IRQF_TRIGGER_FALLING, "tmio-mmc", host); + IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); if (ret) goto unmap_cnf; diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c index 7c302d55910..66123419f65 100644 --- a/drivers/mtd/nand/nomadik_nand.c +++ b/drivers/mtd/nand/nomadik_nand.c @@ -216,7 +216,7 @@ static int nomadik_nand_resume(struct device *dev) return 0; } -static struct dev_pm_ops nomadik_nand_pm_ops = { +static const struct dev_pm_ops nomadik_nand_pm_ops = { .suspend = nomadik_nand_suspend, .resume = nomadik_nand_resume, }; diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 78b7167a8ce..39db0e96815 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -837,7 +837,7 @@ static int vortex_resume(struct device *dev) return 0; } -static struct dev_pm_ops vortex_pm_ops = { +static const struct dev_pm_ops vortex_pm_ops = { .suspend = vortex_suspend, .resume = vortex_resume, .freeze = vortex_suspend, diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 0cbe3c0e7c0..b3773006568 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -1646,7 +1646,7 @@ dm9000_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops dm9000_drv_pm_ops = { +static const struct dev_pm_ops dm9000_drv_pm_ops = { .suspend = dm9000_drv_suspend, .resume = dm9000_drv_resume, }; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index acfc5a3aa49..60f96c468a2 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -4859,7 +4859,7 @@ out: return 0; } -static struct dev_pm_ops rtl8169_pm_ops = { +static const struct dev_pm_ops rtl8169_pm_ops = { .suspend = rtl8169_suspend, .resume = rtl8169_resume, .freeze = rtl8169_suspend, diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 20d6095cf41..494cd91ea39 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -2154,7 +2154,7 @@ static int smsc911x_resume(struct device *dev) return (to == 0) ? -EIO : 0; } -static struct dev_pm_ops smsc911x_pm_ops = { +static const struct dev_pm_ops smsc911x_pm_ops = { .suspend = smsc911x_suspend, .resume = smsc911x_resume, }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1ceb9d0f8b9..9cc438282d7 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2689,7 +2689,7 @@ vmxnet3_resume(struct device *device) return 0; } -static struct dev_pm_ops vmxnet3_pm_ops = { +static const struct dev_pm_ops vmxnet3_pm_ops = { .suspend = vmxnet3_suspend, .resume = vmxnet3_resume, }; diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 13a64bc081b..0bc5d474b16 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -779,12 +779,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, read_unlock(&pathentry->rw_lock); DPRINTK("%s: flags before: 0x%X\n", __func__, flags); - - temp = in; - - while (*temp && isspace(*temp)) - temp++; - + + temp = skip_spaces(in); + c = *temp++ - '0'; if ((c != 0) && (c != 1)) goto parse_error; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index ce52ea34fee..a49452e2aed 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -43,7 +43,7 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) } #ifdef CONFIG_PM -static struct dev_pm_ops pcie_portdrv_pm_ops = { +static const struct dev_pm_ops pcie_portdrv_pm_ops = { .suspend = pcie_port_device_suspend, .resume = pcie_port_device_resume, .freeze = pcie_port_device_suspend, diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index da346eb7e77..3aabf1e3798 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c @@ -336,7 +336,7 @@ static int pxa2xx_drv_pcmcia_resume(struct device *dev) return pcmcia_socket_dev_resume(dev); } -static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { +static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { .suspend = pxa2xx_drv_pcmcia_suspend, .resume = pxa2xx_drv_pcmcia_resume, }; diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index fe02cfd4b5e..e4d12acdd52 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -1330,7 +1330,7 @@ static int yenta_dev_resume(struct device *dev) return 0; } -static struct dev_pm_ops yenta_pm_ops = { +static const struct dev_pm_ops yenta_pm_ops = { .suspend_noirq = yenta_dev_suspend_noirq, .resume_noirq = yenta_dev_resume_noirq, .resume = yenta_dev_resume, diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index ab64522aaa6..be27aa47e81 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -460,7 +460,7 @@ static int acerhdf_remove(struct platform_device *device) return 0; } -static struct dev_pm_ops acerhdf_pm_ops = { +static const struct dev_pm_ops acerhdf_pm_ops = { .suspend = acerhdf_suspend, .freeze = acerhdf_suspend, }; diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 4226e535273..e647a856b9b 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -154,7 +154,7 @@ static struct eeepc_hotk *ehotk; static int eeepc_hotk_thaw(struct device *device); static int eeepc_hotk_restore(struct device *device); -static struct dev_pm_ops eeepc_pm_ops = { +static const struct dev_pm_ops eeepc_pm_ops = { .thaw = eeepc_hotk_thaw, .restore = eeepc_hotk_restore, }; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index c2842171cec..f00a71c58e6 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -94,7 +94,7 @@ static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *wwan_rfkill; -static struct dev_pm_ops hp_wmi_pm_ops = { +static const struct dev_pm_ops hp_wmi_pm_ops = { .resume = hp_wmi_resume_handler, .restore = hp_wmi_resume_handler, }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 0ed84806f8a..cf61d6a8ef6 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1006,11 +1006,8 @@ static int parse_strtoul(const char *buf, { char *endp; - while (*buf && isspace(*buf)) - buf++; - *value = simple_strtoul(buf, &endp, 0); - while (*endp && isspace(*endp)) - endp++; + *value = simple_strtoul(skip_spaces(buf), &endp, 0); + endp = skip_spaces(endp); if (*endp || *value > max) return -EINVAL; diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c index c3f1c8e9d25..68b0c04987e 100644 --- a/drivers/pnp/interface.c +++ b/drivers/pnp/interface.c @@ -310,8 +310,7 @@ static ssize_t pnp_set_current_resources(struct device *dmdev, goto done; } - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf); if (!strnicmp(buf, "disable", 7)) { retval = pnp_disable_dev(dev); goto done; @@ -353,19 +352,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev, pnp_init_resources(dev); mutex_lock(&pnp_res_mutex); while (1) { - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf); if (!strnicmp(buf, "io", 2)) { - buf += 2; - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf + 2); start = simple_strtoul(buf, &buf, 0); - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf); if (*buf == '-') { - buf += 1; - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf + 1); end = simple_strtoul(buf, &buf, 0); } else end = start; @@ -373,16 +366,11 @@ static ssize_t pnp_set_current_resources(struct device *dmdev, continue; } if (!strnicmp(buf, "mem", 3)) { - buf += 3; - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf + 3); start = simple_strtoul(buf, &buf, 0); - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf); if (*buf == '-') { - buf += 1; - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf + 1); end = simple_strtoul(buf, &buf, 0); } else end = start; @@ -390,17 +378,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev, continue; } if (!strnicmp(buf, "irq", 3)) { - buf += 3; - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf + 3); start = simple_strtoul(buf, &buf, 0); pnp_add_irq_resource(dev, start, 0); continue; } if (!strnicmp(buf, "dma", 3)) { - buf += 3; - while (isspace(*buf)) - ++buf; + buf = skip_spaces(buf + 3); start = simple_strtoul(buf, &buf, 0); pnp_add_dma_resource(dev, start, 0); continue; diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 11867492551..d4b3d67f054 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -29,6 +29,13 @@ config APM_POWER Say Y here to enable support APM status emulation using battery class devices. +config WM831X_BACKUP + tristate "WM831X backup battery charger support" + depends on MFD_WM831X + help + Say Y here to enable support for the backup battery charger + in the Wolfson Microelectronics WM831x PMICs. + config WM831X_POWER tristate "WM831X PMU support" depends on MFD_WM831X diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 356cdfd3c8b..573597c683b 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o obj-$(CONFIG_PDA_POWER) += pda_power.o obj-$(CONFIG_APM_POWER) += apm_power.o +obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o obj-$(CONFIG_WM831X_POWER) += wm831x_power.o obj-$(CONFIG_WM8350_POWER) += wm8350_power.o diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c index 6a84a8eb8d7..ea3fdfaca90 100644 --- a/drivers/power/pcf50633-charger.c +++ b/drivers/power/pcf50633-charger.c @@ -29,15 +29,12 @@ struct pcf50633_mbc { struct pcf50633 *pcf; - int adapter_active; int adapter_online; - int usb_active; int usb_online; struct power_supply usb; struct power_supply adapter; - - struct delayed_work charging_restart_work; + struct power_supply ac; }; int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) @@ -47,16 +44,21 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) u8 bits; int charging_start = 1; u8 mbcs2, chgmod; + unsigned int mbcc5; - if (ma >= 1000) + if (ma >= 1000) { bits = PCF50633_MBCC7_USB_1000mA; - else if (ma >= 500) + ma = 1000; + } else if (ma >= 500) { bits = PCF50633_MBCC7_USB_500mA; - else if (ma >= 100) + ma = 500; + } else if (ma >= 100) { bits = PCF50633_MBCC7_USB_100mA; - else { + ma = 100; + } else { bits = PCF50633_MBCC7_USB_SUSPEND; charging_start = 0; + ma = 0; } ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, @@ -66,21 +68,40 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) else dev_info(pcf->dev, "usb curlim to %d mA\n", ma); - /* Manual charging start */ - mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); + /* + * We limit the charging current to be the USB current limit. + * The reason is that on pcf50633, when it enters PMU Standby mode, + * which it does when the device goes "off", the USB current limit + * reverts to the variant default. In at least one common case, that + * default is 500mA. By setting the charging current to be the same + * as the USB limit we set here before PMU standby, we enforce it only + * using the correct amount of current even when the USB current limit + * gets reset to the wrong thing + */ + + if (mbc->pcf->pdata->charger_reference_current_ma) { + mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma; + if (mbcc5 > 255) + mbcc5 = 255; + pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5); + } + + mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); /* If chgmod == BATFULL, setting chgena has no effect. - * We need to set resume instead. + * Datasheet says we need to set resume instead but when autoresume is + * used resume doesn't work. Clear and set chgena instead. */ if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); - else + else { + pcf50633_reg_clear_bits(pcf, PCF50633_REG_MBCC1, + PCF50633_MBCC1_CHGENA); pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, - PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); - - mbc->usb_active = charging_start; + PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); + } power_supply_changed(&mbc->usb); @@ -92,20 +113,44 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf) { struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); int status = 0; + u8 chgmod; + + if (!mbc) + return 0; + + chgmod = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2) + & PCF50633_MBCS2_MBC_MASK; if (mbc->usb_online) status |= PCF50633_MBC_USB_ONLINE; - if (mbc->usb_active) + if (chgmod == PCF50633_MBCS2_MBC_USB_PRE || + chgmod == PCF50633_MBCS2_MBC_USB_PRE_WAIT || + chgmod == PCF50633_MBCS2_MBC_USB_FAST || + chgmod == PCF50633_MBCS2_MBC_USB_FAST_WAIT) status |= PCF50633_MBC_USB_ACTIVE; if (mbc->adapter_online) status |= PCF50633_MBC_ADAPTER_ONLINE; - if (mbc->adapter_active) + if (chgmod == PCF50633_MBCS2_MBC_ADP_PRE || + chgmod == PCF50633_MBCS2_MBC_ADP_PRE_WAIT || + chgmod == PCF50633_MBCS2_MBC_ADP_FAST || + chgmod == PCF50633_MBCS2_MBC_ADP_FAST_WAIT) status |= PCF50633_MBC_ADAPTER_ACTIVE; return status; } EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); +int pcf50633_mbc_get_usb_online_status(struct pcf50633 *pcf) +{ + struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); + + if (!mbc) + return 0; + + return mbc->usb_online; +} +EXPORT_SYMBOL_GPL(pcf50633_mbc_get_usb_online_status); + static ssize_t show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) { @@ -156,9 +201,55 @@ static ssize_t set_usblim(struct device *dev, static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim); +static ssize_t +show_chglim(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct pcf50633_mbc *mbc = dev_get_drvdata(dev); + u8 mbcc5 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC5); + unsigned int ma; + + if (!mbc->pcf->pdata->charger_reference_current_ma) + return -ENODEV; + + ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8; + + return sprintf(buf, "%u\n", ma); +} + +static ssize_t set_chglim(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct pcf50633_mbc *mbc = dev_get_drvdata(dev); + unsigned long ma; + unsigned int mbcc5; + int ret; + + if (!mbc->pcf->pdata->charger_reference_current_ma) + return -ENODEV; + + ret = strict_strtoul(buf, 10, &ma); + if (ret) + return -EINVAL; + + mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma; + if (mbcc5 > 255) + mbcc5 = 255; + pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5); + + return count; +} + +/* + * This attribute allows to change MBC charging limit on the fly + * independently of usb current limit. It also gets set automatically every + * time usb current limit is changed. + */ +static DEVICE_ATTR(chg_curlim, S_IRUGO | S_IWUSR, show_chglim, set_chglim); + static struct attribute *pcf50633_mbc_sysfs_entries[] = { &dev_attr_chgmode.attr, &dev_attr_usb_curlim.attr, + &dev_attr_chg_curlim.attr, NULL, }; @@ -167,76 +258,26 @@ static struct attribute_group mbc_attr_group = { .attrs = pcf50633_mbc_sysfs_entries, }; -/* MBC state machine switches into charging mode when the battery voltage - * falls below 96% of a battery float voltage. But the voltage drop in Li-ion - * batteries is marginal(1~2 %) till about 80% of its capacity - which means, - * after a BATFULL, charging won't be restarted until 80%. - * - * This work_struct function restarts charging at regular intervals to make - * sure we don't discharge too much - */ - -static void pcf50633_mbc_charging_restart(struct work_struct *work) -{ - struct pcf50633_mbc *mbc; - u8 mbcs2, chgmod; - - mbc = container_of(work, struct pcf50633_mbc, - charging_restart_work.work); - - mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); - chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); - - if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) - return; - - /* Restart charging */ - pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1, - PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); - mbc->usb_active = 1; - power_supply_changed(&mbc->usb); - - dev_info(mbc->pcf->dev, "Charging restarted\n"); -} - static void pcf50633_mbc_irq_handler(int irq, void *data) { struct pcf50633_mbc *mbc = data; - int chg_restart_interval = - mbc->pcf->pdata->charging_restart_interval; /* USB */ if (irq == PCF50633_IRQ_USBINS) { mbc->usb_online = 1; } else if (irq == PCF50633_IRQ_USBREM) { mbc->usb_online = 0; - mbc->usb_active = 0; pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); - cancel_delayed_work_sync(&mbc->charging_restart_work); } /* Adapter */ - if (irq == PCF50633_IRQ_ADPINS) { + if (irq == PCF50633_IRQ_ADPINS) mbc->adapter_online = 1; - mbc->adapter_active = 1; - } else if (irq == PCF50633_IRQ_ADPREM) { + else if (irq == PCF50633_IRQ_ADPREM) mbc->adapter_online = 0; - mbc->adapter_active = 0; - } - - if (irq == PCF50633_IRQ_BATFULL) { - mbc->usb_active = 0; - mbc->adapter_active = 0; - - if (chg_restart_interval > 0) - schedule_delayed_work(&mbc->charging_restart_work, - chg_restart_interval); - } else if (irq == PCF50633_IRQ_USBLIMON) - mbc->usb_active = 0; - else if (irq == PCF50633_IRQ_USBLIMOFF) - mbc->usb_active = 1; + power_supply_changed(&mbc->ac); power_supply_changed(&mbc->usb); power_supply_changed(&mbc->adapter); @@ -269,10 +310,34 @@ static int usb_get_property(struct power_supply *psy, { struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb); int ret = 0; + u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) & + PCF50633_MBCC7_USB_MASK; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: - val->intval = mbc->usb_online; + val->intval = mbc->usb_online && + (usblim <= PCF50633_MBCC7_USB_500mA); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int ac_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, ac); + int ret = 0; + u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) & + PCF50633_MBCC7_USB_MASK; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + val->intval = mbc->usb_online && + (usblim == PCF50633_MBCC7_USB_1000mA); break; default: ret = -EINVAL; @@ -336,6 +401,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) mbc->usb.supplied_to = mbc->pcf->pdata->batteries; mbc->usb.num_supplicants = mbc->pcf->pdata->num_batteries; + mbc->ac.name = "ac"; + mbc->ac.type = POWER_SUPPLY_TYPE_MAINS; + mbc->ac.properties = power_props; + mbc->ac.num_properties = ARRAY_SIZE(power_props); + mbc->ac.get_property = ac_get_property; + mbc->ac.supplied_to = mbc->pcf->pdata->batteries; + mbc->ac.num_supplicants = mbc->pcf->pdata->num_batteries; + ret = power_supply_register(&pdev->dev, &mbc->adapter); if (ret) { dev_err(mbc->pcf->dev, "failed to register adapter\n"); @@ -351,8 +424,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) return ret; } - INIT_DELAYED_WORK(&mbc->charging_restart_work, - pcf50633_mbc_charging_restart); + ret = power_supply_register(&pdev->dev, &mbc->ac); + if (ret) { + dev_err(mbc->pcf->dev, "failed to register ac\n"); + power_supply_unregister(&mbc->adapter); + power_supply_unregister(&mbc->usb); + kfree(mbc); + return ret; + } ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); if (ret) @@ -378,8 +457,7 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev) power_supply_unregister(&mbc->usb); power_supply_unregister(&mbc->adapter); - - cancel_delayed_work_sync(&mbc->charging_restart_work); + power_supply_unregister(&mbc->ac); kfree(mbc); diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 08144393d64..c790e0c77d4 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -65,7 +65,10 @@ static ssize_t power_supply_show_property(struct device *dev, ret = psy->get_property(psy, off, &value); if (ret < 0) { - if (ret != -ENODEV) + if (ret == -ENODATA) + dev_dbg(dev, "driver has no data for `%s' property\n", + attr->attr.name); + else if (ret != -ENODEV) dev_err(dev, "driver failed to report `%s' property\n", attr->attr.name); return ret; diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c new file mode 100644 index 00000000000..bf4f387a800 --- /dev/null +++ b/drivers/power/wm831x_backup.c @@ -0,0 +1,233 @@ +/* + * Backup battery driver for Wolfson Microelectronics wm831x PMICs + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> + +#include <linux/mfd/wm831x/core.h> +#include <linux/mfd/wm831x/auxadc.h> +#include <linux/mfd/wm831x/pmu.h> +#include <linux/mfd/wm831x/pdata.h> + +struct wm831x_backup { + struct wm831x *wm831x; + struct power_supply backup; +}; + +static int wm831x_backup_read_voltage(struct wm831x *wm831x, + enum wm831x_auxadc src, + union power_supply_propval *val) +{ + int ret; + + ret = wm831x_auxadc_read_uv(wm831x, src); + if (ret >= 0) + val->intval = ret; + + return ret; +} + +/********************************************************************* + * Backup supply properties + *********************************************************************/ + +static void wm831x_config_backup(struct wm831x *wm831x) +{ + struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; + struct wm831x_backup_pdata *pdata; + int ret, reg; + + if (!wm831x_pdata || !wm831x_pdata->backup) { + dev_warn(wm831x->dev, + "No backup battery charger configuration\n"); + return; + } + + pdata = wm831x_pdata->backup; + + reg = 0; + + if (pdata->charger_enable) + reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA; + if (pdata->no_constant_voltage) + reg |= WM831X_BKUP_CHG_MODE; + + switch (pdata->vlim) { + case 2500: + break; + case 3100: + reg |= WM831X_BKUP_CHG_VLIM; + break; + default: + dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n", + pdata->vlim); + } + + switch (pdata->ilim) { + case 100: + break; + case 200: + reg |= 1; + break; + case 300: + reg |= 2; + break; + case 400: + reg |= 3; + break; + default: + dev_err(wm831x->dev, "Invalid backup current limit %duA\n", + pdata->ilim); + } + + ret = wm831x_reg_unlock(wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); + return; + } + + ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL, + WM831X_BKUP_CHG_ENA_MASK | + WM831X_BKUP_CHG_MODE_MASK | + WM831X_BKUP_BATT_DET_ENA_MASK | + WM831X_BKUP_CHG_VLIM_MASK | + WM831X_BKUP_CHG_ILIM_MASK, + reg); + if (ret != 0) + dev_err(wm831x->dev, + "Failed to set backup charger config: %d\n", ret); + + wm831x_reg_lock(wm831x); +} + +static int wm831x_backup_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_backup *devdata = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = devdata->wm831x; + int ret = 0; + + ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL); + if (ret < 0) + return ret; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + if (ret & WM831X_BKUP_CHG_STS) + val->intval = POWER_SUPPLY_STATUS_CHARGING; + else + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT, + val); + break; + + case POWER_SUPPLY_PROP_PRESENT: + if (ret & WM831X_BKUP_CHG_STS) + val->intval = 1; + else + val->intval = 0; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_backup_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_PRESENT, +}; + +/********************************************************************* + * Initialisation + *********************************************************************/ + +static __devinit int wm831x_backup_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_backup *devdata; + struct power_supply *backup; + int ret; + + devdata = kzalloc(sizeof(struct wm831x_backup), GFP_KERNEL); + if (devdata == NULL) + return -ENOMEM; + + devdata->wm831x = wm831x; + platform_set_drvdata(pdev, devdata); + + backup = &devdata->backup; + + /* We ignore configuration failures since we can still read + * back the status without enabling the charger (which may + * already be enabled anyway). + */ + wm831x_config_backup(wm831x); + + backup->name = "wm831x-backup"; + backup->type = POWER_SUPPLY_TYPE_BATTERY; + backup->properties = wm831x_backup_props; + backup->num_properties = ARRAY_SIZE(wm831x_backup_props); + backup->get_property = wm831x_backup_get_prop; + ret = power_supply_register(&pdev->dev, backup); + if (ret) + goto err_kmalloc; + + return ret; + +err_kmalloc: + kfree(devdata); + return ret; +} + +static __devexit int wm831x_backup_remove(struct platform_device *pdev) +{ + struct wm831x_backup *devdata = platform_get_drvdata(pdev); + + power_supply_unregister(&devdata->backup); + kfree(devdata); + + return 0; +} + +static struct platform_driver wm831x_backup_driver = { + .probe = wm831x_backup_probe, + .remove = __devexit_p(wm831x_backup_remove), + .driver = { + .name = "wm831x-backup", + }, +}; + +static int __init wm831x_backup_init(void) +{ + return platform_driver_register(&wm831x_backup_driver); +} +module_init(wm831x_backup_init); + +static void __exit wm831x_backup_exit(void) +{ + platform_driver_unregister(&wm831x_backup_driver); +} +module_exit(wm831x_backup_exit); + +MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs"); +MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-backup"); diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c index 2a4c8b0b829..f85e80b1b40 100644 --- a/drivers/power/wm831x_power.c +++ b/drivers/power/wm831x_power.c @@ -21,7 +21,6 @@ struct wm831x_power { struct wm831x *wm831x; struct power_supply wall; - struct power_supply backup; struct power_supply usb; struct power_supply battery; }; @@ -454,125 +453,6 @@ static irqreturn_t wm831x_bat_irq(int irq, void *data) /********************************************************************* - * Backup supply properties - *********************************************************************/ - -static void wm831x_config_backup(struct wm831x *wm831x) -{ - struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; - struct wm831x_backup_pdata *pdata; - int ret, reg; - - if (!wm831x_pdata || !wm831x_pdata->backup) { - dev_warn(wm831x->dev, - "No backup battery charger configuration\n"); - return; - } - - pdata = wm831x_pdata->backup; - - reg = 0; - - if (pdata->charger_enable) - reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA; - if (pdata->no_constant_voltage) - reg |= WM831X_BKUP_CHG_MODE; - - switch (pdata->vlim) { - case 2500: - break; - case 3100: - reg |= WM831X_BKUP_CHG_VLIM; - break; - default: - dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n", - pdata->vlim); - } - - switch (pdata->ilim) { - case 100: - break; - case 200: - reg |= 1; - break; - case 300: - reg |= 2; - break; - case 400: - reg |= 3; - break; - default: - dev_err(wm831x->dev, "Invalid backup current limit %duA\n", - pdata->ilim); - } - - ret = wm831x_reg_unlock(wm831x); - if (ret != 0) { - dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); - return; - } - - ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL, - WM831X_BKUP_CHG_ENA_MASK | - WM831X_BKUP_CHG_MODE_MASK | - WM831X_BKUP_BATT_DET_ENA_MASK | - WM831X_BKUP_CHG_VLIM_MASK | - WM831X_BKUP_CHG_ILIM_MASK, - reg); - if (ret != 0) - dev_err(wm831x->dev, - "Failed to set backup charger config: %d\n", ret); - - wm831x_reg_lock(wm831x); -} - -static int wm831x_backup_get_prop(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) -{ - struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); - struct wm831x *wm831x = wm831x_power->wm831x; - int ret = 0; - - ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL); - if (ret < 0) - return ret; - - switch (psp) { - case POWER_SUPPLY_PROP_STATUS: - if (ret & WM831X_BKUP_CHG_STS) - val->intval = POWER_SUPPLY_STATUS_CHARGING; - else - val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; - break; - - case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BKUP_BATT, - val); - break; - - case POWER_SUPPLY_PROP_PRESENT: - if (ret & WM831X_BKUP_CHG_STS) - val->intval = 1; - else - val->intval = 0; - break; - - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static enum power_supply_property wm831x_backup_props[] = { - POWER_SUPPLY_PROP_STATUS, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_PRESENT, -}; - -/********************************************************************* * Initialisation *********************************************************************/ @@ -595,10 +475,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data) dev_dbg(wm831x->dev, "Power source changed\n"); - /* Just notify for everything - little harm in overnotifying. - * The backup battery is not a power source while the system - * is running so skip that. - */ + /* Just notify for everything - little harm in overnotifying. */ power_supply_changed(&wm831x_power->battery); power_supply_changed(&wm831x_power->usb); power_supply_changed(&wm831x_power->wall); @@ -613,7 +490,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) struct power_supply *usb; struct power_supply *battery; struct power_supply *wall; - struct power_supply *backup; int ret, irq, i; power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL); @@ -626,13 +502,11 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) usb = &power->usb; battery = &power->battery; wall = &power->wall; - backup = &power->backup; /* We ignore configuration failures since we can still read back - * the status without enabling either of the chargers. + * the status without enabling the charger. */ wm831x_config_battery(wm831x); - wm831x_config_backup(wm831x); wall->name = "wm831x-wall"; wall->type = POWER_SUPPLY_TYPE_MAINS; @@ -661,15 +535,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) if (ret) goto err_battery; - backup->name = "wm831x-backup"; - backup->type = POWER_SUPPLY_TYPE_BATTERY; - backup->properties = wm831x_backup_props; - backup->num_properties = ARRAY_SIZE(wm831x_backup_props); - backup->get_property = wm831x_backup_get_prop; - ret = power_supply_register(&pdev->dev, backup); - if (ret) - goto err_usb; - irq = platform_get_irq_byname(pdev, "SYSLO"); ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq, IRQF_TRIGGER_RISING, "SYSLO", @@ -677,7 +542,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) if (ret != 0) { dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", irq, ret); - goto err_backup; + goto err_usb; } irq = platform_get_irq_byname(pdev, "PWR SRC"); @@ -716,8 +581,6 @@ err_bat_irq: err_syslo: irq = platform_get_irq_byname(pdev, "SYSLO"); wm831x_free_irq(wm831x, irq, power); -err_backup: - power_supply_unregister(backup); err_usb: power_supply_unregister(usb); err_battery: @@ -746,7 +609,6 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev) irq = platform_get_irq_byname(pdev, "SYSLO"); wm831x_free_irq(wm831x, irq, wm831x_power); - power_supply_unregister(&wm831x_power->backup); power_supply_unregister(&wm831x_power->battery); power_supply_unregister(&wm831x_power->wall); power_supply_unregister(&wm831x_power->usb); diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index f2bfd296dba..fa39e759a27 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -157,7 +157,7 @@ static int wm97xx_bat_resume(struct device *dev) return 0; } -static struct dev_pm_ops wm97xx_bat_pm_ops = { +static const struct dev_pm_ops wm97xx_bat_pm_ops = { .suspend = wm97xx_bat_suspend, .resume = wm97xx_bat_resume, }; diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index 747ca194fad..e6351b743da 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c @@ -456,7 +456,7 @@ static int pxa_rtc_resume(struct device *dev) return 0; } -static struct dev_pm_ops pxa_rtc_pm_ops = { +static const struct dev_pm_ops pxa_rtc_pm_ops = { .suspend = pxa_rtc_suspend, .resume = pxa_rtc_resume, }; diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 29f98a70586..e4a44b64170 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -407,7 +407,7 @@ static int sa1100_rtc_resume(struct device *dev) return 0; } -static struct dev_pm_ops sa1100_rtc_pm_ops = { +static const struct dev_pm_ops sa1100_rtc_pm_ops = { .suspend = sa1100_rtc_suspend, .resume = sa1100_rtc_resume, }; diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index e6ed5404bca..e95cc6f8d61 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -826,7 +826,7 @@ static int sh_rtc_resume(struct device *dev) return 0; } -static struct dev_pm_ops sh_rtc_dev_pm_ops = { +static const struct dev_pm_ops sh_rtc_dev_pm_ops = { .suspend = sh_rtc_suspend, .resume = sh_rtc_resume, }; diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c index 79795cdf6ed..000c7e481e5 100644 --- a/drivers/rtc/rtc-wm831x.c +++ b/drivers/rtc/rtc-wm831x.c @@ -485,7 +485,7 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev) return 0; } -static struct dev_pm_ops wm831x_rtc_pm_ops = { +static const struct dev_pm_ops wm831x_rtc_pm_ops = { .suspend = wm831x_rtc_suspend, .resume = wm831x_rtc_resume, diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 5f23eca8280..6315fbd8e68 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -14,6 +14,7 @@ #define KMSG_COMPONENT "dasd" #include <linux/ctype.h> +#include <linux/string.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> @@ -272,10 +273,10 @@ dasd_statistics_write(struct file *file, const char __user *user_buf, DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer); /* check for valid verbs */ - for (str = buffer; isspace(*str); str++); + str = skip_spaces(buffer); if (strncmp(str, "set", 3) == 0 && isspace(str[3])) { /* 'set xxx' was given */ - for (str = str + 4; isspace(*str); str++); + str = skip_spaces(str + 4); if (strcmp(str, "on") == 0) { /* switch on statistics profiling */ dasd_profile_level = DASD_PROFILE_ON; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index f76f4bd82b9..9b43ae94beb 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -1005,7 +1005,7 @@ static int dcssblk_thaw(struct device *dev) return 0; } -static struct dev_pm_ops dcssblk_pm_ops = { +static const struct dev_pm_ops dcssblk_pm_ops = { .freeze = dcssblk_freeze, .thaw = dcssblk_thaw, .restore = dcssblk_restore, diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 116d1b3eeb1..118de392af6 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -407,7 +407,7 @@ static int xpram_restore(struct device *dev) return 0; } -static struct dev_pm_ops xpram_pm_ops = { +static const struct dev_pm_ops xpram_pm_ops = { .restore = xpram_restore, }; diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 60473f86e1f..33e96484d54 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -529,7 +529,7 @@ static int monreader_restore(struct device *dev) return monreader_thaw(dev); } -static struct dev_pm_ops monreader_pm_ops = { +static const struct dev_pm_ops monreader_pm_ops = { .freeze = monreader_freeze, .thaw = monreader_thaw, .restore = monreader_restore, diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 6532ed8b4af..668a0579b26 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -323,7 +323,7 @@ static int monwriter_thaw(struct device *dev) return monwriter_restore(dev); } -static struct dev_pm_ops monwriter_pm_ops = { +static const struct dev_pm_ops monwriter_pm_ops = { .freeze = monwriter_freeze, .thaw = monwriter_thaw, .restore = monwriter_restore, diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index a983f508678..ec88c59842e 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -1019,7 +1019,7 @@ static int sclp_restore(struct device *dev) return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); } -static struct dev_pm_ops sclp_pm_ops = { +static const struct dev_pm_ops sclp_pm_ops = { .freeze = sclp_freeze, .thaw = sclp_thaw, .restore = sclp_restore, diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 28b5afc129c..b3beab610da 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -547,7 +547,7 @@ struct read_storage_sccb { u32 entries[0]; } __packed; -static struct dev_pm_ops sclp_mem_pm_ops = { +static const struct dev_pm_ops sclp_mem_pm_ops = { .freeze = sclp_mem_freeze, }; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 899aa795bf3..7dfa5412d5a 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -675,7 +675,7 @@ static int vmlogrdr_pm_prepare(struct device *dev) } -static struct dev_pm_ops vmlogrdr_pm_ops = { +static const struct dev_pm_ops vmlogrdr_pm_ops = { .prepare = vmlogrdr_pm_prepare, }; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index a5a62f1f774..5f97ea2ee6b 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -560,7 +560,7 @@ static int ccwgroup_pm_restore(struct device *dev) return gdrv->restore ? gdrv->restore(gdev) : 0; } -static struct dev_pm_ops ccwgroup_pm_ops = { +static const struct dev_pm_ops ccwgroup_pm_ops = { .prepare = ccwgroup_pm_prepare, .complete = ccwgroup_pm_complete, .freeze = ccwgroup_pm_freeze, diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 92ff88ac110..7679aee6fa1 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1148,7 +1148,7 @@ static int css_pm_restore(struct device *dev) return drv->restore ? drv->restore(sch) : 0; } -static struct dev_pm_ops css_pm_ops = { +static const struct dev_pm_ops css_pm_ops = { .prepare = css_pm_prepare, .complete = css_pm_complete, .freeze = css_pm_freeze, diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 9fecfb4223a..73901c9e260 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1904,7 +1904,7 @@ out_unlock: return ret; } -static struct dev_pm_ops ccw_pm_ops = { +static const struct dev_pm_ops ccw_pm_ops = { .prepare = ccw_device_pm_prepare, .complete = ccw_device_pm_complete, .freeze = ccw_device_pm_freeze, diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 98c04cac43c..65ebee0a326 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -159,7 +159,7 @@ static void netiucv_pm_complete(struct device *); static int netiucv_pm_freeze(struct device *); static int netiucv_pm_restore_thaw(struct device *); -static struct dev_pm_ops netiucv_pm_ops = { +static const struct dev_pm_ops netiucv_pm_ops = { .prepare = netiucv_pm_prepare, .complete = netiucv_pm_complete, .freeze = netiucv_pm_freeze, diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3012355f830..67f2485d237 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -168,7 +168,7 @@ static int smsg_pm_restore_thaw(struct device *dev) return 0; } -static struct dev_pm_ops smsg_pm_ops = { +static const struct dev_pm_ops smsg_pm_ops = { .freeze = smsg_pm_freeze, .thaw = smsg_pm_restore_thaw, .restore = smsg_pm_restore_thaw, diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 206c2fa8c1b..8643f508936 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1333,7 +1333,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, error = &hostrcb->hcam.u.error.u.type_17_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; - strstrip(error->failure_reason); + strim(error->failure_reason); ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, be32_to_cpu(hostrcb->hcam.u.error.prc)); @@ -1359,7 +1359,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, error = &hostrcb->hcam.u.error.u.type_07_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; - strstrip(error->failure_reason); + strim(error->failure_reason); ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, be32_to_cpu(hostrcb->hcam.u.error.prc)); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 2b38f6ad6e1..8b955b534a3 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -984,7 +984,7 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) } } -static int skip_spaces(char *ptr, int len) +static int sym_skip_spaces(char *ptr, int len) { int cnt, c; @@ -1012,7 +1012,7 @@ static int is_keyword(char *ptr, int len, char *verb) } #define SKIP_SPACES(ptr, len) \ - if ((arg_len = skip_spaces(ptr, len)) < 1) \ + if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \ return -EINVAL; \ ptr += arg_len; len -= arg_len; diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index d8983dd5c4b..85dc0410ac1 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c @@ -2162,7 +2162,7 @@ static struct ioc3_submodule ioc3uart_ops = { /** * ioc3_detect - module init called, */ -static int __devinit ioc3uart_init(void) +static int __init ioc3uart_init(void) { int ret; @@ -2179,7 +2179,7 @@ static int __devinit ioc3uart_init(void) return ret; } -static void __devexit ioc3uart_exit(void) +static void __exit ioc3uart_exit(void) { ioc3_unregister_submodule(&ioc3uart_ops); uart_unregister_driver(&ioc3_uart); diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c index 2e02c3026d2..836d9ab4f72 100644 --- a/drivers/serial/ioc4_serial.c +++ b/drivers/serial/ioc4_serial.c @@ -2904,7 +2904,7 @@ static struct ioc4_submodule ioc4_serial_submodule = { /** * ioc4_serial_init - module init */ -int ioc4_serial_init(void) +static int __init ioc4_serial_init(void) { int ret; @@ -2913,20 +2913,30 @@ int ioc4_serial_init(void) printk(KERN_WARNING "%s: Couldn't register rs232 IOC4 serial driver\n", __func__); - return ret; + goto out; } if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) { printk(KERN_WARNING "%s: Couldn't register rs422 IOC4 serial driver\n", __func__); - return ret; + goto out_uart_rs232; } /* register with IOC4 main module */ - return ioc4_register_submodule(&ioc4_serial_submodule); + ret = ioc4_register_submodule(&ioc4_serial_submodule); + if (ret) + goto out_uart_rs422; + return 0; + +out_uart_rs422: + uart_unregister_driver(&ioc4_uart_rs422); +out_uart_rs232: + uart_unregister_driver(&ioc4_uart_rs232); +out: + return ret; } -static void __devexit ioc4_serial_exit(void) +static void __exit ioc4_serial_exit(void) { ioc4_unregister_submodule(&ioc4_serial_submodule); uart_unregister_driver(&ioc4_uart_rs232); diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c index 4a821046baa..56ee082157a 100644 --- a/drivers/serial/pxa.c +++ b/drivers/serial/pxa.c @@ -756,7 +756,7 @@ static int serial_pxa_resume(struct device *dev) return 0; } -static struct dev_pm_ops serial_pxa_pm_ops = { +static const struct dev_pm_ops serial_pxa_pm_ops = { .suspend = serial_pxa_suspend, .resume = serial_pxa_resume, }; diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index ff38dbdb5c6..7e3f4ff58cf 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -1312,7 +1312,7 @@ static int sci_resume(struct device *dev) return 0; } -static struct dev_pm_ops sci_dev_pm_ops = { +static const struct dev_pm_ops sci_dev_pm_ops = { .suspend = sci_suspend, .resume = sci_resume, }; diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c index 816d4c592a3..66802a4390c 100644 --- a/drivers/sn/ioc3.c +++ b/drivers/sn/ioc3.c @@ -574,11 +574,11 @@ void ioc3_unregister_submodule(struct ioc3_submodule *is) * Device management * *********************/ -static char * +static char * __devinitdata ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3", "MENET 4", "CADduo", "Altix Serial"}; -static int ioc3_class(struct ioc3_driver_data *idd) +static int __devinit ioc3_class(struct ioc3_driver_data *idd) { int res = IOC3_CLASS_NONE; /* NIC-based logic */ @@ -601,7 +601,8 @@ static int ioc3_class(struct ioc3_driver_data *idd) return res; } /* Adds a new instance of an IOC3 card */ -static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) +static int __devinit +ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct ioc3_driver_data *idd; uint32_t pcmd; @@ -753,7 +754,7 @@ out: } /* Removes a particular instance of an IOC3 card. */ -static void ioc3_remove(struct pci_dev *pdev) +static void __devexit ioc3_remove(struct pci_dev *pdev) { int id; struct ioc3_driver_data *idd; @@ -805,7 +806,7 @@ static struct pci_driver ioc3_driver = { .name = "IOC3", .id_table = ioc3_id_table, .probe = ioc3_probe, - .remove = ioc3_remove, + .remove = __devexit_p(ioc3_remove), }; MODULE_DEVICE_TABLE(pci, ioc3_id_table); @@ -815,15 +816,15 @@ MODULE_DEVICE_TABLE(pci, ioc3_id_table); *********************/ /* Module load */ -static int __devinit ioc3_init(void) +static int __init ioc3_init(void) { if (ia64_platform_is("sn2")) return pci_register_driver(&ioc3_driver); - return 0; + return -ENODEV; } /* Module unload */ -static void __devexit ioc3_exit(void) +static void __exit ioc3_exit(void) { pci_unregister_driver(&ioc3_driver); } diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index c8c2b693ffa..c2f707e5ce7 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -1709,7 +1709,7 @@ static int pxa2xx_spi_resume(struct device *dev) return 0; } -static struct dev_pm_ops pxa2xx_spi_pm_ops = { +static const struct dev_pm_ops pxa2xx_spi_pm_ops = { .suspend = pxa2xx_spi_suspend, .resume = pxa2xx_spi_resume, }; diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 33d94f76b9e..276591569c8 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -489,7 +489,7 @@ static int s3c24xx_spi_resume(struct device *dev) return 0; } -static struct dev_pm_ops s3c24xx_spi_pmops = { +static const struct dev_pm_ops s3c24xx_spi_pmops = { .suspend = s3c24xx_spi_suspend, .resume = s3c24xx_spi_resume, }; diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index aa53db9f2e8..1ef3b8fc50b 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -210,7 +210,7 @@ static int uio_pdrv_genirq_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { +static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { .runtime_suspend = uio_pdrv_genirq_runtime_nop, .runtime_resume = uio_pdrv_genirq_runtime_nop, }; diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 91f2885b6ee..2dcf906df56 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -363,7 +363,7 @@ static int hcd_pci_restore(struct device *dev) return resume_common(dev, true); } -struct dev_pm_ops usb_hcd_pci_pm_ops = { +const struct dev_pm_ops usb_hcd_pci_pm_ops = { .suspend = hcd_pci_suspend, .suspend_noirq = hcd_pci_suspend_noirq, .resume_noirq = hcd_pci_resume_noirq, diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h index d8b43aee581..bbe2b924aae 100644 --- a/drivers/usb/core/hcd.h +++ b/drivers/usb/core/hcd.h @@ -330,7 +330,7 @@ extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); #ifdef CONFIG_PM_SLEEP -extern struct dev_pm_ops usb_hcd_pci_pm_ops; +extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif #endif /* CONFIG_PCI */ diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 4e2c6df8d3c..2fb42043b30 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -167,18 +167,23 @@ struct usb_host_interface *usb_altnum_to_altsetting( } EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); +struct find_interface_arg { + int minor; + struct device_driver *drv; +}; + static int __find_interface(struct device *dev, void *data) { - int *minor = data; + struct find_interface_arg *arg = data; struct usb_interface *intf; if (!is_usb_interface(dev)) return 0; + if (dev->driver != arg->drv) + return 0; intf = to_usb_interface(dev); - if (intf->minor != -1 && intf->minor == *minor) - return 1; - return 0; + return intf->minor == arg->minor; } /** @@ -187,14 +192,18 @@ static int __find_interface(struct device *dev, void *data) * @minor: the minor number of the desired device * * This walks the bus device list and returns a pointer to the interface - * with the matching minor. Note, this only works for devices that share the - * USB major number. + * with the matching minor and driver. Note, this only works for devices + * that share the USB major number. */ struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) { + struct find_interface_arg argb; struct device *dev; - dev = bus_find_device(&usb_bus_type, NULL, &minor, __find_interface); + argb.minor = minor; + argb.drv = &drv->drvwrap.driver; + + dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface); /* Drop reference count from bus_find_device */ put_device(dev); @@ -320,7 +329,7 @@ static int usb_dev_restore(struct device *dev) return usb_resume(dev, PMSG_RESTORE); } -static struct dev_pm_ops usb_device_pm_ops = { +static const struct dev_pm_ops usb_device_pm_ops = { .prepare = usb_dev_prepare, .complete = usb_dev_complete, .suspend = usb_dev_suspend, diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index ed77be76d6b..dbfb482a94e 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c @@ -297,7 +297,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ehci_pmops = { +static const struct dev_pm_ops au1xxx_ehci_pmops = { .suspend = ehci_hcd_au1xxx_drv_suspend, .resume = ehci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index e4380082ebb..17a6043c1fa 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c @@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ohci_pmops = { +static const struct dev_pm_ops au1xxx_ohci_pmops = { .suspend = ohci_hcd_au1xxx_drv_suspend, .resume = ohci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index f1c06202fdf..a18debdd79b 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { +static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { .suspend = ohci_hcd_pxa27x_drv_suspend, .resume = ohci_hcd_pxa27x_drv_resume, }; diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 41dbc70ae75..b7a661c02bc 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -2353,7 +2353,7 @@ static int r8a66597_resume(struct device *dev) return 0; } -static struct dev_pm_ops r8a66597_dev_pm_ops = { +static const struct dev_pm_ops r8a66597_dev_pm_ops = { .suspend = r8a66597_suspend, .resume = r8a66597_resume, .poweroff = r8a66597_suspend, diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 49f2346afad..bfe08f4975a 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2214,7 +2214,7 @@ static int musb_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops musb_dev_pm_ops = { +static const struct dev_pm_ops musb_dev_pm_ops = { .suspend = musb_suspend, .resume_noirq = musb_resume_noirq, }; diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c index 7fcb0eb54c6..f2d76dae1eb 100644 --- a/drivers/video/backlight/da903x_bl.c +++ b/drivers/video/backlight/da903x_bl.c @@ -177,7 +177,7 @@ static int da903x_backlight_resume(struct device *dev) return 0; } -static struct dev_pm_ops da903x_backlight_pm_ops = { +static const struct dev_pm_ops da903x_backlight_pm_ops = { .suspend = da903x_backlight_suspend, .resume = da903x_backlight_resume, }; diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index a482dd7b031..9b3be74cee5 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c @@ -101,7 +101,7 @@ static ssize_t lcd_store_power(struct device *dev, int power = simple_strtoul(buf, &endp, 0); size_t size = endp - buf; - if (*endp && isspace(*endp)) + if (isspace(*endp)) size++; if (size != count) return -EINVAL; @@ -140,7 +140,7 @@ static ssize_t lcd_store_contrast(struct device *dev, int contrast = simple_strtoul(buf, &endp, 0); size_t size = endp - buf; - if (*endp && isspace(*endp)) + if (isspace(*endp)) size++; if (size != count) return -EINVAL; diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c index 4830b1bf51e..80abbf323b9 100644 --- a/drivers/video/display/display-sysfs.c +++ b/drivers/video/display/display-sysfs.c @@ -67,7 +67,7 @@ static ssize_t display_store_contrast(struct device *dev, contrast = simple_strtoul(buf, &endp, 0); size = endp - buf; - if (*endp && isspace(*endp)) + if (isspace(*endp)) size++; if (size != count) diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c index e759895bf3d..f0af911a096 100644 --- a/drivers/video/geode/display_gx.c +++ b/drivers/video/geode/display_gx.c @@ -17,7 +17,7 @@ #include <asm/io.h> #include <asm/div64.h> #include <asm/delay.h> -#include <asm/geode.h> +#include <linux/cs5535.h> #include "gxfb.h" @@ -25,7 +25,7 @@ unsigned int gx_frame_buffer_size(void) { unsigned int val; - if (!geode_has_vsa2()) { + if (!cs5535_has_vsa2()) { uint32_t hi, lo; /* The number of pages is (PMAX - PMIN)+1 */ diff --git a/drivers/video/geode/gxfb.h b/drivers/video/geode/gxfb.h index 16a96f8fd8c..d19e9378b0c 100644 --- a/drivers/video/geode/gxfb.h +++ b/drivers/video/geode/gxfb.h @@ -340,7 +340,7 @@ static inline void write_fp(struct gxfb_par *par, int reg, uint32_t val) } -/* MSRs are defined in asm/geode.h; their bitfields are here */ +/* MSRs are defined in linux/cs5535.h; their bitfields are here */ #define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (1 << 3) #define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (1 << 2) diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c index 2552cac39e1..b3e639d1e12 100644 --- a/drivers/video/geode/gxfb_core.c +++ b/drivers/video/geode/gxfb_core.c @@ -32,7 +32,7 @@ #include <linux/suspend.h> #include <linux/init.h> #include <linux/pci.h> -#include <asm/geode.h> +#include <linux/cs5535.h> #include "gxfb.h" diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h index 6a51448fd3f..fc68a8b0a14 100644 --- a/drivers/video/geode/lxfb.h +++ b/drivers/video/geode/lxfb.h @@ -409,7 +409,7 @@ static inline void write_fp(struct lxfb_par *par, int reg, uint32_t val) } -/* MSRs are defined in asm/geode.h; their bitfields are here */ +/* MSRs are defined in linux/cs5535.h; their bitfields are here */ #define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */ #define MSR_GLCP_DOTPLL_HALFPIX (1 << 24) diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index b1cd49c9935..0e5d8c7c3eb 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c @@ -13,7 +13,7 @@ #include <linux/fb.h> #include <linux/uaccess.h> #include <linux/delay.h> -#include <asm/geode.h> +#include <linux/cs5535.h> #include "lxfb.h" @@ -307,7 +307,7 @@ unsigned int lx_framebuffer_size(void) { unsigned int val; - if (!geode_has_vsa2()) { + if (!cs5535_has_vsa2()) { uint32_t hi, lo; /* The number of pages is (PMAX - PMIN)+1 */ diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/geode/suspend_gx.c index 9aff32ef8bb..1bb043d70c6 100644 --- a/drivers/video/geode/suspend_gx.c +++ b/drivers/video/geode/suspend_gx.c @@ -10,7 +10,7 @@ #include <linux/fb.h> #include <asm/io.h> #include <asm/msr.h> -#include <asm/geode.h> +#include <linux/cs5535.h> #include <asm/delay.h> #include "gxfb.h" diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c index b8d52a8360d..6082f653c68 100644 --- a/drivers/video/geode/video_gx.c +++ b/drivers/video/geode/video_gx.c @@ -16,7 +16,7 @@ #include <asm/io.h> #include <asm/delay.h> #include <asm/msr.h> -#include <asm/geode.h> +#include <linux/cs5535.h> #include "gxfb.h" diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c index e7116a6d82d..73c83a8de2d 100644 --- a/drivers/video/hitfb.c +++ b/drivers/video/hitfb.c @@ -456,7 +456,7 @@ static int hitfb_resume(struct device *dev) return 0; } -static struct dev_pm_ops hitfb_dev_pm_ops = { +static const struct dev_pm_ops hitfb_dev_pm_ops = { .suspend = hitfb_suspend, .resume = hitfb_resume, }; diff --git a/drivers/video/output.c b/drivers/video/output.c index 5e6439ae739..5137aa016b8 100644 --- a/drivers/video/output.c +++ b/drivers/video/output.c @@ -50,7 +50,7 @@ static ssize_t video_output_store_state(struct device *dev, int request_state = simple_strtoul(buf,&endp,0); size_t size = endp - buf; - if (*endp && isspace(*endp)) + if (isspace(*endp)) size++; if (size != count) return -EINVAL; diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index f58a3aae6ea..b7e58059b59 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -1667,7 +1667,7 @@ static int pxafb_resume(struct device *dev) return 0; } -static struct dev_pm_ops pxafb_pm_ops = { +static const struct dev_pm_ops pxafb_pm_ops = { .suspend = pxafb_suspend, .resume = pxafb_resume, }; diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index b4b5de930cf..8a65fb6648a 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -890,7 +890,7 @@ static int sh_mobile_lcdc_runtime_resume(struct device *dev) return 0; } -static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { +static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { .suspend = sh_mobile_lcdc_suspend, .resume = sh_mobile_lcdc_resume, .runtime_suspend = sh_mobile_lcdc_runtime_suspend, diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c index 77afb0acc50..9c6594473d3 100644 --- a/drivers/watchdog/adx_wdt.c +++ b/drivers/watchdog/adx_wdt.c @@ -314,7 +314,7 @@ static int adx_wdt_resume(struct device *dev) return 0; } -static struct dev_pm_ops adx_wdt_pm_ops = { +static const struct dev_pm_ops adx_wdt_pm_ops = { .suspend = adx_wdt_suspend, .resume = adx_wdt_resume, }; diff --git a/fs/Kconfig b/fs/Kconfig index 64d44efad7a..f8fccaaad62 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -6,6 +6,10 @@ menu "File systems" if BLOCK +config FS_JOURNAL_INFO + bool + default n + source "fs/ext2/Kconfig" source "fs/ext3/Kconfig" source "fs/ext4/Kconfig" diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 38502c67987..79d2b1aa389 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -380,7 +380,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, down_write(¤t->mm->mmap_sem); current->mm->start_brk = do_mmap(NULL, 0, stack_size, PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, + MAP_PRIVATE | MAP_ANONYMOUS | + MAP_UNINITIALIZED | MAP_GROWSDOWN, 0); if (IS_ERR_VALUE(current->mm->start_brk)) { diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 7bb3c020e57..402afe0a0bf 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -4,6 +4,7 @@ config BTRFS_FS select LIBCRC32C select ZLIB_INFLATE select ZLIB_DEFLATE + select FS_JOURNAL_INFO help Btrfs is a new filesystem with extents, writable snapshotting, support for multiple devices and many more features. diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 4618516dd99..c2413561ea7 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -21,6 +21,7 @@ #include <linux/mount.h> #include <linux/statfs.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/fs_struct.h> #include "internal.h" @@ -257,8 +258,7 @@ static ssize_t cachefiles_daemon_write(struct file *file, if (args == data) goto error; *args = '\0'; - for (args++; isspace(*args); args++) - continue; + args = skip_spaces(++args); } /* run the appropriate command handler */ diff --git a/fs/exec.c b/fs/exec.c index c0c636e34f6..623a5cc3076 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -923,6 +923,15 @@ char *get_task_comm(char *buf, struct task_struct *tsk) void set_task_comm(struct task_struct *tsk, char *buf) { task_lock(tsk); + + /* + * Threads may access current->comm without holding + * the task lock, so write the string carefully. + * Readers without a lock may see incomplete new + * names but are safe from non-terminating string reads. + */ + memset(tsk->comm, 0, TASK_COMM_LEN); + wmb(); strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); perf_event_comm(tsk); diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 9acf7e80813..e5f6774846e 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -2,6 +2,7 @@ config EXT4_FS tristate "The Extended 4 (ext4) filesystem" select JBD2 select CRC16 + select FS_JOURNAL_INFO help This is the next generation of the ext3 filesystem. diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 768c111a77e..827bde1f259 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2137,11 +2137,8 @@ static int parse_strtoul(const char *buf, { char *endp; - while (*buf && isspace(*buf)) - buf++; - *value = simple_strtoul(buf, &endp, 0); - while (*endp && isspace(*endp)) - endp++; + *value = simple_strtoul(skip_spaces(buf), &endp, 0); + endp = skip_spaces(endp); if (*endp || *value > max) return -EINVAL; diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index 4dcddf83326..b192c661caa 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig @@ -10,6 +10,7 @@ config GFS2_FS select SLOW_WORK select QUOTA select QUOTACTL + select FS_JOURNAL_INFO help A cluster filesystem. diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index c5dad1eb7b9..0dc34621f6a 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -85,11 +85,7 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf) buf[0] = '\0'; if (!gfs2_uuid_valid(uuid)) return 0; - return snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X-%02X%02X-" - "%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n", - uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], - uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11], - uuid[12], uuid[13], uuid[14], uuid[15]); + return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid); } static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) @@ -575,14 +571,8 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj, add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); if (!sdp->sd_args.ar_spectator) add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); - if (gfs2_uuid_valid(uuid)) { - add_uevent_var(env, "UUID=%02X%02X%02X%02X-%02X%02X-%02X%02X-" - "%02X%02X-%02X%02X%02X%02X%02X%02X", - uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], - uuid[5], uuid[6], uuid[7], uuid[8], uuid[9], - uuid[10], uuid[11], uuid[12], uuid[13], - uuid[14], uuid[15]); - } + if (gfs2_uuid_valid(uuid)) + add_uevent_var(env, "UUID=%pUB", uuid); return 0; } diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c index 6d98f116ca0..424b0337f52 100644 --- a/fs/hfs/catalog.c +++ b/fs/hfs/catalog.c @@ -289,6 +289,10 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name, err = hfs_brec_find(&src_fd); if (err) goto out; + if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) { + err = -EIO; + goto out; + } hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, src_fd.entrylength); diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index 7c69b98a2e4..2b3b8611b41 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -79,6 +79,11 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) filp->f_pos++; /* fall through */ case 1: + if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { + err = -EIO; + goto out; + } + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (entry.type != HFS_CDR_THD) { printk(KERN_ERR "hfs: bad catalog folder thread\n"); @@ -109,6 +114,12 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) err = -EIO; goto out; } + + if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { + err = -EIO; + goto out; + } + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = entry.type; len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); diff --git a/fs/hfs/super.c b/fs/hfs/super.c index f7fcbe49da7..5ed7252b7b2 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -409,8 +409,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) /* try to get the root inode */ hfs_find_init(HFS_SB(sb)->cat_tree, &fd); res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); - if (!res) + if (!res) { + if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { + res = -EIO; + goto bail; + } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); + } if (res) { hfs_find_exit(&fd); goto bail_no_root; diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig index 4e28beeed15..a8408983abd 100644 --- a/fs/jbd/Kconfig +++ b/fs/jbd/Kconfig @@ -1,5 +1,6 @@ config JBD tristate + select FS_JOURNAL_INFO help This is a generic journalling layer for block devices. It is currently used by the ext3 file system, but it could also be diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig index f32f346f4b0..0f7d1ceafdf 100644 --- a/fs/jbd2/Kconfig +++ b/fs/jbd2/Kconfig @@ -1,6 +1,7 @@ config JBD2 tristate select CRC32 + select FS_JOURNAL_INFO help This is a generic journaling layer for block devices that support both 32-bit and 64-bit block numbers. It is currently used by diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig index 251da07b2a1..1225af7b216 100644 --- a/fs/nilfs2/Kconfig +++ b/fs/nilfs2/Kconfig @@ -2,6 +2,7 @@ config NILFS2_FS tristate "NILFS2 file system support (EXPERIMENTAL)" depends on EXPERIMENTAL select CRC32 + select FS_JOURNAL_INFO help NILFS2 is a log-structured file system (LFS) supporting continuous snapshotting. In addition to versioning capability of the entire diff --git a/fs/proc/base.c b/fs/proc/base.c index af643b5aefe..4df4a464a91 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1265,6 +1265,72 @@ static const struct file_operations proc_pid_sched_operations = { #endif +static ssize_t comm_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct task_struct *p; + char buffer[TASK_COMM_LEN]; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) + return -EFAULT; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + if (same_thread_group(current, p)) + set_task_comm(p, buffer); + else + count = -EINVAL; + + put_task_struct(p); + + return count; +} + +static int comm_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct task_struct *p; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + task_lock(p); + seq_printf(m, "%s\n", p->comm); + task_unlock(p); + + put_task_struct(p); + + return 0; +} + +static int comm_open(struct inode *inode, struct file *filp) +{ + int ret; + + ret = single_open(filp, comm_show, NULL); + if (!ret) { + struct seq_file *m = filp->private_data; + + m->private = inode; + } + return ret; +} + +static const struct file_operations proc_pid_set_comm_operations = { + .open = comm_open, + .read = seq_read, + .write = comm_write, + .llseek = seq_lseek, + .release = single_release, +}; + /* * We added or removed a vma mapping the executable. The vmas are only mapped * during exec and are not mapped with the mmap system call. @@ -2504,6 +2570,7 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK INF("syscall", S_IRUSR, proc_pid_syscall), #endif @@ -2838,6 +2905,7 @@ static const struct pid_entry tid_base_stuff[] = { #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK INF("syscall", S_IRUSR, proc_pid_syscall), #endif diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 2a1bef9203c..47c03f4336b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -650,6 +650,50 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return err; } +static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) +{ + u64 pme = 0; + if (pte_present(pte)) + pme = PM_PFRAME(pte_pfn(pte) + offset) + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; + return pme; +} + +static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct vm_area_struct *vma; + struct pagemapread *pm = walk->private; + struct hstate *hs = NULL; + int err = 0; + + vma = find_vma(walk->mm, addr); + if (vma) + hs = hstate_vma(vma); + for (; addr != end; addr += PAGE_SIZE) { + u64 pfn = PM_NOT_PRESENT; + + if (vma && (addr >= vma->vm_end)) { + vma = find_vma(walk->mm, addr); + if (vma) + hs = hstate_vma(vma); + } + + if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) { + /* calculate pfn of the "raw" page in the hugepage. */ + int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT; + pfn = huge_pte_to_pagemap_entry(*pte, offset); + } + err = add_to_pagemap(addr, pfn, pm); + if (err) + return err; + } + + cond_resched(); + + return err; +} + /* * /proc/pid/pagemap - an array mapping virtual pages to pfns * @@ -742,6 +786,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, pagemap_walk.pmd_entry = pagemap_pte_range; pagemap_walk.pte_hole = pagemap_pte_hole; + pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; pagemap_walk.mm = mm; pagemap_walk.private = ± diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 8f5c05d3dbd..5d9fd64ef81 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -110,9 +110,13 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, } } - size += (*text = mm->end_code - mm->start_code); - size += (*data = mm->start_stack - mm->start_data); + *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) + >> PAGE_SHIFT; + *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) + >> PAGE_SHIFT; up_read(&mm->mmap_sem); + size >>= PAGE_SHIFT; + size += *text + *data; *resident = size; return size; } diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig index 513f431038f..ac7cd75c86f 100644 --- a/fs/reiserfs/Kconfig +++ b/fs/reiserfs/Kconfig @@ -1,6 +1,7 @@ config REISERFS_FS tristate "Reiserfs support" select CRC32 + select FS_JOURNAL_INFO help Stores not just filenames but the files themselves in a balanced tree. Uses journalling. diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 8a771c59ac3..90492327b38 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -350,13 +350,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) le32_to_cpu(sup->fmt_version)); printk(KERN_DEBUG "\ttime_gran %u\n", le32_to_cpu(sup->time_gran)); - printk(KERN_DEBUG "\tUUID %02X%02X%02X%02X-%02X%02X" - "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n", - sup->uuid[0], sup->uuid[1], sup->uuid[2], sup->uuid[3], - sup->uuid[4], sup->uuid[5], sup->uuid[6], sup->uuid[7], - sup->uuid[8], sup->uuid[9], sup->uuid[10], sup->uuid[11], - sup->uuid[12], sup->uuid[13], sup->uuid[14], - sup->uuid[15]); + printk(KERN_DEBUG "\tUUID %pUB\n", + sup->uuid); break; } case UBIFS_MST_NODE: diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 943ad562453..43f9d19a6f3 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1393,12 +1393,7 @@ static int mount_ubifs(struct ubifs_info *c) c->leb_size, c->leb_size >> 10); dbg_msg("data journal heads: %d", c->jhead_cnt - NONDATA_JHEADS_CNT); - dbg_msg("UUID: %02X%02X%02X%02X-%02X%02X" - "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X", - c->uuid[0], c->uuid[1], c->uuid[2], c->uuid[3], - c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7], - c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11], - c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]); + dbg_msg("UUID: %pUB", c->uuid); dbg_msg("big_lpt %d", c->big_lpt); dbg_msg("log LEBs: %d (%d - %d)", c->log_lebs, UBIFS_LOG_LNUM, c->log_last); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index abc2034d83e..69ac2e5ef20 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -226,16 +226,10 @@ xlog_header_check_dump( xfs_mount_t *mp, xlog_rec_header_t *head) { - int b; - - cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__); - for (b = 0; b < 16; b++) - cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]); - cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT); - cmn_err(CE_DEBUG, " log : uuid = "); - for (b = 0; b < 16; b++) - cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]); - cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt)); + cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n", + __func__, &mp->m_sb.sb_uuid, XLOG_FMT); + cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n", + &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); } #else #define xlog_header_check_dump(mp, head) diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index c8946465e63..ecc44a8e2b4 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -15,19 +15,19 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - __raw_spin_lock(s); \ + arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ - __raw_spin_unlock(s); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 4b6755984d2..18c435d7c08 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -113,22 +113,22 @@ extern void warn_slowpath_null(const char *file, const int line); #endif #define WARN_ON_ONCE(condition) ({ \ - static int __warned; \ + static bool __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once)) \ if (WARN_ON(!__warned)) \ - __warned = 1; \ + __warned = true; \ unlikely(__ret_warn_once); \ }) #define WARN_ONCE(condition, format...) ({ \ - static int __warned; \ + static bool __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once)) \ if (WARN(!__warned, format)) \ - __warned = 1; \ + __warned = true; \ unlikely(__ret_warn_once); \ }) diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 5ee13b2fd22..20111265afd 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h @@ -19,6 +19,11 @@ #define MAP_TYPE 0x0f /* Mask for type of mapping */ #define MAP_FIXED 0x10 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x20 /* don't use a file */ +#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED +# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */ +#else +# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ +#endif #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 57b1846a3c8..3e09b345f4d 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -3,8 +3,6 @@ #define ATMEL_MCI_MAX_NR_SLOTS 2 -#include <linux/dw_dmac.h> - /** * struct mci_slot_pdata - board-specific per-slot configuration * @bus_width: Number of data lines wired up the slot @@ -34,7 +32,7 @@ struct mci_slot_pdata { * @slot: Per-slot configuration data. */ struct mci_platform_data { - struct dw_dma_slave dma_slave; + struct mci_dma_data *dma_slave; struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; }; diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h new file mode 100644 index 00000000000..d5a1d4810b8 --- /dev/null +++ b/include/linux/cs5535.h @@ -0,0 +1,172 @@ +/* + * AMD CS5535/CS5536 definitions + * Copyright (C) 2006 Advanced Micro Devices, Inc. + * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef _CS5535_H +#define _CS5535_H + +/* MSRs */ +#define MSR_GLIU_P2D_RO0 0x10000029 + +#define MSR_LX_GLD_MSR_CONFIG 0x48002001 +#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data + * sheet has the wrong value */ +#define MSR_GLCP_SYS_RSTPLL 0x4C000014 +#define MSR_GLCP_DOTPLL 0x4C000015 + +#define MSR_LBAR_SMB 0x5140000B +#define MSR_LBAR_GPIO 0x5140000C +#define MSR_LBAR_MFGPT 0x5140000D +#define MSR_LBAR_ACPI 0x5140000E +#define MSR_LBAR_PMS 0x5140000F + +#define MSR_DIVIL_SOFT_RESET 0x51400017 + +#define MSR_PIC_YSEL_LOW 0x51400020 +#define MSR_PIC_YSEL_HIGH 0x51400021 +#define MSR_PIC_ZSEL_LOW 0x51400022 +#define MSR_PIC_ZSEL_HIGH 0x51400023 +#define MSR_PIC_IRQM_LPC 0x51400025 + +#define MSR_MFGPT_IRQ 0x51400028 +#define MSR_MFGPT_NR 0x51400029 +#define MSR_MFGPT_SETUP 0x5140002B + +#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ + +#define MSR_GX_GLD_MSR_CONFIG 0xC0002001 +#define MSR_GX_MSR_PADSEL 0xC0002011 + +/* resource sizes */ +#define LBAR_GPIO_SIZE 0xFF +#define LBAR_MFGPT_SIZE 0x40 +#define LBAR_ACPI_SIZE 0x40 +#define LBAR_PMS_SIZE 0x80 + +/* VSA2 magic values */ +#define VSA_VRC_INDEX 0xAC1C +#define VSA_VRC_DATA 0xAC1E +#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ +#define VSA_VR_SIGNATURE 0x0003 +#define VSA_VR_MEM_SIZE 0x0200 +#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ +#define GSW_VSA_SIG 0x534d /* General Software signature */ + +#include <linux/io.h> + +static inline int cs5535_has_vsa2(void) +{ + static int has_vsa2 = -1; + + if (has_vsa2 == -1) { + uint16_t val; + + /* + * The VSA has virtual registers that we can query for a + * signature. + */ + outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); + outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); + + val = inw(VSA_VRC_DATA); + has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); + } + + return has_vsa2; +} + +/* GPIOs */ +#define GPIO_OUTPUT_VAL 0x00 +#define GPIO_OUTPUT_ENABLE 0x04 +#define GPIO_OUTPUT_OPEN_DRAIN 0x08 +#define GPIO_OUTPUT_INVERT 0x0C +#define GPIO_OUTPUT_AUX1 0x10 +#define GPIO_OUTPUT_AUX2 0x14 +#define GPIO_PULL_UP 0x18 +#define GPIO_PULL_DOWN 0x1C +#define GPIO_INPUT_ENABLE 0x20 +#define GPIO_INPUT_INVERT 0x24 +#define GPIO_INPUT_FILTER 0x28 +#define GPIO_INPUT_EVENT_COUNT 0x2C +#define GPIO_READ_BACK 0x30 +#define GPIO_INPUT_AUX1 0x34 +#define GPIO_EVENTS_ENABLE 0x38 +#define GPIO_LOCK_ENABLE 0x3C +#define GPIO_POSITIVE_EDGE_EN 0x40 +#define GPIO_NEGATIVE_EDGE_EN 0x44 +#define GPIO_POSITIVE_EDGE_STS 0x48 +#define GPIO_NEGATIVE_EDGE_STS 0x4C + +#define GPIO_MAP_X 0xE0 +#define GPIO_MAP_Y 0xE4 +#define GPIO_MAP_Z 0xE8 +#define GPIO_MAP_W 0xEC + +void cs5535_gpio_set(unsigned offset, unsigned int reg); +void cs5535_gpio_clear(unsigned offset, unsigned int reg); +int cs5535_gpio_isset(unsigned offset, unsigned int reg); + +/* MFGPTs */ + +#define MFGPT_MAX_TIMERS 8 +#define MFGPT_TIMER_ANY (-1) + +#define MFGPT_DOMAIN_WORKING 1 +#define MFGPT_DOMAIN_STANDBY 2 +#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) + +#define MFGPT_CMP1 0 +#define MFGPT_CMP2 1 + +#define MFGPT_EVENT_IRQ 0 +#define MFGPT_EVENT_NMI 1 +#define MFGPT_EVENT_RESET 3 + +#define MFGPT_REG_CMP1 0 +#define MFGPT_REG_CMP2 2 +#define MFGPT_REG_COUNTER 4 +#define MFGPT_REG_SETUP 6 + +#define MFGPT_SETUP_CNTEN (1 << 15) +#define MFGPT_SETUP_CMP2 (1 << 14) +#define MFGPT_SETUP_CMP1 (1 << 13) +#define MFGPT_SETUP_SETUP (1 << 12) +#define MFGPT_SETUP_STOPEN (1 << 11) +#define MFGPT_SETUP_EXTEN (1 << 10) +#define MFGPT_SETUP_REVEN (1 << 5) +#define MFGPT_SETUP_CLKSEL (1 << 4) + +struct cs5535_mfgpt_timer; + +extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, + uint16_t reg); +extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, + uint16_t value); + +extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, + int event, int enable); +extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, + int *irq, int enable); +extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer, + int domain); +extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer); + +static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 1); +} + +static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 0); +} + +#endif diff --git a/include/linux/ctype.h b/include/linux/ctype.h index afa36392297..a3d6ee0044f 100644 --- a/include/linux/ctype.h +++ b/include/linux/ctype.h @@ -15,7 +15,7 @@ #define _X 0x40 /* hex digit */ #define _SP 0x80 /* hard space (0x20) */ -extern unsigned char _ctype[]; +extern const unsigned char _ctype[]; #define __ismask(x) (_ctype[(int)(unsigned char)(x)]) @@ -27,6 +27,7 @@ extern unsigned char _ctype[]; #define islower(c) ((__ismask(c)&(_L)) != 0) #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) #define ispunct(c) ((__ismask(c)&(_P)) != 0) +/* Note: isspace() must return false for %NUL-terminator */ #define isspace(c) ((__ismask(c)&(_S)) != 0) #define isupper(c) ((__ismask(c)&(_U)) != 0) #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index df7607e6dce..d4c9c0b88ad 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist); const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); -int dm_suspended(struct mapped_device *md); +int dm_suspended(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); union map_info *dm_get_mapinfo(struct bio *bio); union map_info *dm_get_rq_mapinfo(struct request *rq); @@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t); /* * Table reference counting. */ -struct dm_table *dm_get_table(struct mapped_device *md); +struct dm_table *dm_get_live_table(struct mapped_device *md); void dm_table_get(struct dm_table *t); void dm_table_put(struct dm_table *t); @@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t); /* * The device must be suspended before calling this method. + * Returns the previous table, which the caller must destroy. */ -int dm_swap_table(struct mapped_device *md, struct dm_table *t); +struct dm_table *dm_swap_table(struct mapped_device *md, + struct dm_table *t); /* * A wrapper around vmalloc. diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h index 5e8b11d88f6..7084503c340 100644 --- a/include/linux/dm-dirty-log.h +++ b/include/linux/dm-dirty-log.h @@ -21,6 +21,7 @@ struct dm_dirty_log_type; struct dm_dirty_log { struct dm_dirty_log_type *type; + int (*flush_callback_fn)(struct dm_target *ti); void *context; }; @@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type); * type->constructor/destructor() directly. */ struct dm_dirty_log *dm_dirty_log_create(const char *type_name, - struct dm_target *ti, - unsigned argc, char **argv); + struct dm_target *ti, + int (*flush_callback_fn)(struct dm_target *ti), + unsigned argc, char **argv); void dm_dirty_log_destroy(struct dm_dirty_log *log); #endif /* __KERNEL__ */ diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index 2ab84c83c31..aa95508d2f9 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. - * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved. * * This file is released under the LGPL. */ @@ -266,9 +266,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 15 +#define DM_VERSION_MINOR 16 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2009-04-01)" +#define DM_VERSION_EXTRA "-ioctl (2009-11-05)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -309,4 +309,11 @@ enum { */ #define DM_NOFLUSH_FLAG (1 << 11) /* In */ +/* + * If set, any table information returned will relate to the inactive + * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG + * is set before using the data returned. + */ +#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ + #endif /* _LINUX_DM_IOCTL_H */ diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h index a9e652a4137..9e2a7a401df 100644 --- a/include/linux/dm-region-hash.h +++ b/include/linux/dm-region-hash.h @@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region); /* Delay bios on regions. */ void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); -void dm_rh_mark_nosync(struct dm_region_hash *rh, - struct bio *bio, unsigned done, int error); +void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); /* * Region recovery control. diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index a0d9422a156..f8c2e176750 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name); { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ if (__dynamic_dbg_enabled(descriptor)) \ - printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \ - ##__VA_ARGS__); \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ } while (0) @@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name); { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ if (__dynamic_dbg_enabled(descriptor)) \ - dev_printk(KERN_DEBUG, dev, \ - KBUILD_MODNAME ": " fmt, \ - ##__VA_ARGS__); \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ } while (0) #else @@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod) return 0; } -#define dynamic_pr_debug(fmt, ...) do { } while (0) -#define dynamic_dev_dbg(dev, format, ...) do { } while (0) +#define dynamic_pr_debug(fmt, ...) \ + do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define dynamic_dev_dbg(dev, format, ...) \ + do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) #endif #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index ce4581fbc08..fb737bc19a8 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right) static inline char * efi_guid_unparse(efi_guid_t *guid, char *out) { - sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", - guid->b[3], guid->b[2], guid->b[1], guid->b[0], - guid->b[5], guid->b[4], guid->b[7], guid->b[6], - guid->b[8], guid->b[9], guid->b[10], guid->b[11], - guid->b[12], guid->b[13], guid->b[14], guid->b[15]); + sprintf(out, "%pUl", guid->b); return out; } diff --git a/include/linux/err.h b/include/linux/err.h index ec87f3142bf..1b12642636c 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr) return IS_ERR_VALUE((unsigned long)ptr); } +static inline long IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} + /** * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type * @ptr: The pointer to cast. diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index af634e95871..5d86fb2309d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -169,7 +169,7 @@ struct hrtimer_clock_base { * @max_hang_time: Maximum time spent in hrtimer_interrupt */ struct hrtimer_cpu_base { - spinlock_t lock; + raw_spinlock_t lock; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t expires_next; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 41a59afc70f..78b4bc64c00 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); + +#ifdef CONFIG_NUMA +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +#endif + int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 419ab546b26..02fc617782e 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, * @driver: Device driver model driver * @id_table: List of I2C devices supported by this driver * @detect: Callback for device detection - * @address_data: The I2C addresses to probe (for detect) + * @address_list: The I2C addresses to probe (for detect) * @clients: List of detected clients we created (for i2c-core use only) * * The driver.owner field should be set to the module owner of this driver. @@ -161,8 +161,8 @@ struct i2c_driver { const struct i2c_device_id *id_table; /* Device detection callback for automatic device creation */ - int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *); - const struct i2c_client_address_data *address_data; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const unsigned short *address_list; struct list_head clients; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) @@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter) #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ #define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ -/* i2c_client_address_data is the struct for holding default client - * addresses for a driver and for the parameters supplied on the - * command line - */ -struct i2c_client_address_data { - const unsigned short *normal_i2c; -}; - /* Internal numbers to terminate lists */ #define I2C_CLIENT_END 0xfffeU @@ -576,82 +568,4 @@ union i2c_smbus_data { #define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ #define I2C_SMBUS_I2C_BLOCK_DATA 8 - -#ifdef __KERNEL__ - -/* These defines are used for probing i2c client addresses */ -/* The length of the option lists */ -#define I2C_CLIENT_MAX_OPTS 48 - -/* Default fill of many variables */ -#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END} - -/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the - module header */ - -#define I2C_CLIENT_MODULE_PARM(var,desc) \ - static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \ - static unsigned int var##_num; \ - module_param_array(var, short, &var##_num, 0); \ - MODULE_PARM_DESC(var, desc) - -#define I2C_CLIENT_INSMOD_COMMON \ -static const struct i2c_client_address_data addr_data = { \ - .normal_i2c = normal_i2c, \ -} - -/* These are the ones you want to use in your own drivers. Pick the one - which matches the number of devices the driver differenciates between. */ -#define I2C_CLIENT_INSMOD \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_1(chip1) \ -enum chips { any_chip, chip1 }; \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_2(chip1, chip2) \ -enum chips { any_chip, chip1, chip2 }; \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \ -enum chips { any_chip, chip1, chip2, chip3 }; \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \ -enum chips { any_chip, chip1, chip2, chip3, chip4 }; \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ - chip7 }; \ -I2C_CLIENT_INSMOD_COMMON - -#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ - chip7, chip8 }; \ -I2C_CLIENT_INSMOD_COMMON -#endif /* __KERNEL__ */ #endif /* _LINUX_I2C_H */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 8d10aa7fd4c..5ed8b9c5035 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -111,6 +111,12 @@ extern struct cred init_cred; # define INIT_PERF_EVENTS(tsk) #endif +#ifdef CONFIG_FS_JOURNAL_INFO +#define INIT_JOURNAL_INFO .journal_info = NULL, +#else +#define INIT_JOURNAL_INFO +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -162,10 +168,9 @@ extern struct cred init_cred; .signal = {{0}}}, \ .blocked = {{0}}, \ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ - .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ - .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ @@ -173,6 +178,7 @@ extern struct cred init_cred; [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ + INIT_JOURNAL_INFO \ INIT_IDS \ INIT_PERF_EVENTS(tsk) \ INIT_TRACE_IRQFLAGS \ diff --git a/include/linux/irq.h b/include/linux/irq.h index a287cfc0b1a..451481c082b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -192,7 +192,7 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; - spinlock_t lock; + raw_spinlock_t lock; #ifdef CONFIG_SMP cpumask_var_t affinity; unsigned int node; diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 792274269f2..d8e9b3d1c23 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr) __builtin_extract_return_addr((void *)addr)); } -/* - * Pretty-print a function pointer. This function is deprecated. - * Please use the "%pF" vsprintf format instead. - */ -static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr) -{ -#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) - addr = *(void **)addr; -#endif - print_symbol(fmt, (unsigned long)addr); -} - static inline void print_ip_sym(unsigned long ip) { printk("[<%p>] %pS\n", (void *) ip, (void *) ip); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3fa4c590cf1..4d9c916d06d 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -251,10 +251,10 @@ extern int printk_delay_msec; * Print a one-time message (analogous to WARN_ONCE() et al): */ #define printk_once(x...) ({ \ - static bool __print_once = true; \ + static bool __print_once; \ \ - if (__print_once) { \ - __print_once = false; \ + if (!__print_once) { \ + __print_once = true; \ printk(x); \ } \ }) @@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #elif defined(CONFIG_DYNAMIC_DEBUG) /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ -#define pr_debug(fmt, ...) do { \ - dynamic_pr_debug(fmt, ##__VA_ARGS__); \ - } while (0) +#define pr_debug(fmt, ...) \ + dynamic_pr_debug(fmt, ##__VA_ARGS__) #else #define pr_debug(fmt, ...) \ ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) #endif /* + * ratelimited messages with local ratelimit_state, + * no local ratelimit_state used in the !PRINTK case + */ +#ifdef CONFIG_PRINTK +#define printk_ratelimited(fmt, ...) ({ \ + static struct ratelimit_state _rs = { \ + .interval = DEFAULT_RATELIMIT_INTERVAL, \ + .burst = DEFAULT_RATELIMIT_BURST, \ + }; \ + \ + if (!__ratelimit(&_rs)) \ + printk(fmt, ##__VA_ARGS__); \ +}) +#else +/* No effect, but we still get type checking even in the !PRINTK case: */ +#define printk_ratelimited printk +#endif + +#define pr_emerg_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warning_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_notice_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +/* no pr_cont_ratelimited, don't do that... */ +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(DEBUG) +#define pr_debug_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug_ratelimited(fmt, ...) \ + ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ + ##__VA_ARGS__); 0; }) +#endif + +/* * General tracing related utility functions - trace_printk(), * tracing_on/tracing_off and tracing_start()/tracing_stop * diff --git a/include/linux/ksm.h b/include/linux/ksm.h index a485c14ecd5..bed5f16ba82 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -9,8 +9,12 @@ #include <linux/bitops.h> #include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/rmap.h> #include <linux/sched.h> -#include <linux/vmstat.h> + +struct stable_node; +struct mem_cgroup; #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -34,23 +38,60 @@ static inline void ksm_exit(struct mm_struct *mm) /* * A KSM page is one of those write-protected "shared pages" or "merged pages" * which KSM maps into multiple mms, wherever identical anonymous page content - * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. + * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any + * anon_vma, but to that page's node of the stable tree. */ static inline int PageKsm(struct page *page) { - return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); +} + +static inline struct stable_node *page_stable_node(struct page *page) +{ + return PageKsm(page) ? page_rmapping(page) : NULL; +} + +static inline void set_page_stable_node(struct page *page, + struct stable_node *stable_node) +{ + page->mapping = (void *)stable_node + + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); } /* - * But we have to avoid the checking which page_add_anon_rmap() performs. + * When do_swap_page() first faults in from swap what used to be a KSM page, + * no problem, it will be assigned to this vma's anon_vma; but thereafter, + * it might be faulted into a different anon_vma (or perhaps to a different + * offset in the same anon_vma). do_swap_page() cannot do all the locking + * needed to reconstitute a cross-anon_vma KSM page: for now it has to make + * a copy, and leave remerging the pages to a later pass of ksmd. + * + * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, + * but what if the vma was unmerged while the page was swapped out? */ -static inline void page_add_ksm_rmap(struct page *page) +struct page *ksm_does_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address); +static inline struct page *ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) { - if (atomic_inc_and_test(&page->_mapcount)) { - page->mapping = (void *) PAGE_MAPPING_ANON; - __inc_zone_page_state(page, NR_ANON_PAGES); - } + struct anon_vma *anon_vma = page_anon_vma(page); + + if (!anon_vma || + (anon_vma == vma->anon_vma && + page->index == linear_page_index(vma, address))) + return page; + + return ksm_does_need_to_copy(page, vma, address); } + +int page_referenced_ksm(struct page *page, + struct mem_cgroup *memcg, unsigned long *vm_flags); +int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); +int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg); +void ksm_migrate_page(struct page *newpage, struct page *oldpage); + #else /* !CONFIG_KSM */ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -73,7 +114,32 @@ static inline int PageKsm(struct page *page) return 0; } -/* No stub required for page_add_ksm_rmap(page) */ +static inline struct page *ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + return page; +} + +static inline int page_referenced_ksm(struct page *page, + struct mem_cgroup *memcg, unsigned long *vm_flags) +{ + return 0; +} + +static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) +{ + return 0; +} + +static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + return 0; +} + +static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) +{ +} #endif /* !CONFIG_KSM */ -#endif +#endif /* __LINUX_KSM_H */ diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index 3cc2f2c53e4..f1ca0dcc162 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -43,6 +43,21 @@ struct lis3lv02d_platform_data { #define LIS3_WAKEUP_Z_HI (1 << 5) unsigned char wakeup_flags; unsigned char wakeup_thresh; +#define LIS3_NO_MAP 0 +#define LIS3_DEV_X 1 +#define LIS3_DEV_Y 2 +#define LIS3_DEV_Z 3 +#define LIS3_INV_DEV_X -1 +#define LIS3_INV_DEV_Y -2 +#define LIS3_INV_DEV_Z -3 + s8 axis_x; + s8 axis_y; + s8 axis_z; + int (*setup_resources)(void); + int (*release_resources)(void); + /* Limits for selftest are specified in chip data sheet */ + s16 st_min_limits[3]; /* min pass limit x, y, z */ + s16 st_max_limits[3]; /* max pass limit x, y, z */ }; #endif /* __LIS3LV02D_H_ */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index fed969281a4..35b07b773e6 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -69,7 +69,6 @@ extern void online_page(struct page *page); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long); extern void __offline_isolated_pages(unsigned long, unsigned long); -extern int offline_pages(unsigned long, unsigned long, unsigned long); /* reasonably generic interface to expand the physical pages in a zone */ extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 085c903fe0f..1cc966cd3e5 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p); extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); +extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern unsigned slab_node(struct mempolicy *policy); extern enum zone_type policy_zone; @@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, return node_zonelist(0, gfp_flags); } +static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; } + static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index d9034cc87f1..3398bd9aab1 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -29,7 +29,12 @@ struct pcf50633_platform_data { char **batteries; int num_batteries; - int charging_restart_interval; + /* + * Should be set accordingly to the reference resistor used, see + * I_{ch(ref)} charger reference current in the pcf50633 User + * Manual. + */ + int charger_reference_current_ma; /* Callbacks */ void (*probe_done)(struct pcf50633 *); diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h index 4119579acf2..df4f5fa88de 100644 --- a/include/linux/mfd/pcf50633/mbc.h +++ b/include/linux/mfd/pcf50633/mbc.h @@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 { int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); int pcf50633_mbc_get_status(struct pcf50633 *); +int pcf50633_mbc_get_usb_online_status(struct pcf50633 *); #endif diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 527602cdea1..7f085c97c79 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); extern int putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, struct page *, struct page *); -extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long); +extern int migrate_pages(struct list_head *l, new_page_t x, + unsigned long private, int offlining); extern int fail_migrate_page(struct address_space *, struct page *, struct page *); @@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm, static inline int putback_lru_pages(struct list_head *l) { return 0; } static inline int migrate_pages(struct list_head *l, new_page_t x, - unsigned long private) { return -ENOSYS; } - -static inline int migrate_pages_to(struct list_head *pagelist, - struct vm_area_struct *vma, int dest) { return 0; } + unsigned long private, int offlining) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 24c395694f4..9d65ae4ba0e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -620,13 +620,22 @@ void page_address_init(void); /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; - * with the PAGE_MAPPING_ANON bit set to distinguish it. + * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. + * + * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, + * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; + * and then page->mapping points, not to an anon_vma, but to a private + * structure which KSM associates with that merged page. See ksm.h. + * + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ #define PAGE_MAPPING_ANON 1 +#define PAGE_MAPPING_KSM 2 +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) extern struct address_space swapper_space; static inline struct address_space *page_mapping(struct page *page) @@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page) struct address_space *mapping = page->mapping; VM_BUG_ON(PageSlab(page)); -#ifdef CONFIG_SWAP if (unlikely(PageSwapCache(page))) mapping = &swapper_space; - else -#endif - if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) + else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; } +/* Neutral page->mapping pointer to address_space or anon_vma or other */ +static inline void *page_rmapping(struct page *page) +{ + return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); +} + static inline int PageAnon(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; @@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry * @pte_entry: if set, called for each non-empty PTE (4th-level) entry * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry * * (see walk_page_range for more details) */ @@ -767,6 +780,8 @@ struct mm_walk { int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, + struct mm_walk *); struct mm_struct *mm; void *private; }; diff --git a/include/linux/node.h b/include/linux/node.h index 681a697b9a8..06292dac3ea 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -21,13 +21,19 @@ #include <linux/sysdev.h> #include <linux/cpumask.h> +#include <linux/workqueue.h> struct node { struct sys_device sysdev; + +#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) + struct work_struct node_work; +#endif }; struct memory_block; extern struct node node_devices[]; +typedef void (*node_registration_func_t)(struct node *); extern int register_node(struct node *, int, struct node *); extern void unregister_node(struct node *node); @@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); extern int register_mem_sect_under_node(struct memory_block *mem_blk, int nid); extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk); + +#ifdef CONFIG_HUGETLBFS +extern void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister); +#endif #else static inline int register_one_node(int nid) { @@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) { return 0; } + +static inline void register_hugetlbfs_with_node(node_registration_func_t reg, + node_registration_func_t unreg) +{ +} #endif #define to_node(sys_device) container_of(sys_device, struct node, sysdev) diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index b359c4a9ec9..454997cccbd 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp) return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } +static inline void init_nodemask_of_node(nodemask_t *mask, int node) +{ + nodes_clear(*mask); + node_set(node, *mask); +} + #define nodemask_of_node(node) \ ({ \ typeof(_unused_nodemask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ - m.bits[0] = 1UL<<(node); \ + m.bits[0] = 1UL << (node); \ } else { \ - nodes_clear(m); \ - node_set((node), m); \ + init_nodemask_of_node(&m, (node)); \ } \ m; \ }) @@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* - * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) + * For nodemask scrach area. + * NODEMASK_ALLOC(type, name) allocates an object with a specified type and + * name. */ - -#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ -#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL) -#define NODEMASK_FREE(m) kfree(m) +#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */ +#define NODEMASK_ALLOC(type, name, gfp_flags) \ + type *name = kmalloc(sizeof(*name), gfp_flags) +#define NODEMASK_FREE(m) kfree(m) #else -#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m -#define NODEMASK_FREE(m) +#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name +#define NODEMASK_FREE(m) do {} while (0) #endif /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ @@ -497,8 +504,10 @@ struct nodemask_scratch { nodemask_t mask2; }; -#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x) -#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) +#define NODEMASK_SCRATCH(x) \ + NODEMASK_ALLOC(struct nodemask_scratch, x, \ + GFP_KERNEL | __GFP_NORETRY) +#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) #endif /* __LINUX_NODEMASK_H */ diff --git a/include/linux/numa.h b/include/linux/numa.h index a31a7301b15..3aaa31603a8 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -10,4 +10,6 @@ #define MAX_NUMNODES (1 << NODES_SHIFT) +#define NUMA_NO_NODE (-1) + #endif /* _LINUX_NUMA_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b202b17395..49e907bd067 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -99,7 +99,7 @@ enum pageflags { PG_buddy, /* Page is free, on buddy lists */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT +#ifdef CONFIG_MMU PG_mlocked, /* Page is vma mlocked */ #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED @@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache) PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) TESTCLEARFLAG(Unevictable, unevictable) -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT -#define MLOCK_PAGES 1 +#ifdef CONFIG_MMU PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) #else -#define MLOCK_PAGES 0 PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) #endif @@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page) #endif /* !PAGEFLAGS_EXTENDED */ -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT +#ifdef CONFIG_MMU #define __PG_MLOCKED (1 << PG_mlocked) #else #define __PG_MLOCKED 0 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 64a53f74c9a..da7bdc23f27 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -681,7 +681,7 @@ struct perf_event_context { * Protect the states of the events in the list, * nr_active, and the list: */ - spinlock_t lock; + raw_spinlock_t lock; /* * Protect the list of events. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change diff --git a/include/linux/plist.h b/include/linux/plist.h index 45926d77d6a..8227f717c70 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -81,7 +81,8 @@ struct plist_head { struct list_head prio_list; struct list_head node_list; #ifdef CONFIG_DEBUG_PI_LIST - spinlock_t *lock; + raw_spinlock_t *rawlock; + spinlock_t *spinlock; #endif }; @@ -91,9 +92,11 @@ struct plist_node { }; #ifdef CONFIG_DEBUG_PI_LIST -# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock +# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock +# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock #else # define PLIST_HEAD_LOCK_INIT(_lock) +# define PLIST_HEAD_LOCK_INIT_RAW(_lock) #endif #define _PLIST_HEAD_INIT(head) \ @@ -107,11 +110,22 @@ struct plist_node { */ #define PLIST_HEAD_INIT(head, _lock) \ { \ - _PLIST_HEAD_INIT(head), \ + _PLIST_HEAD_INIT(head), \ PLIST_HEAD_LOCK_INIT(&(_lock)) \ } /** + * PLIST_HEAD_INIT_RAW - static struct plist_head initializer + * @head: struct plist_head variable name + * @_lock: lock to initialize for this list + */ +#define PLIST_HEAD_INIT_RAW(head, _lock) \ +{ \ + _PLIST_HEAD_INIT(head), \ + PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \ +} + +/** * PLIST_NODE_INIT - static struct plist_node initializer * @node: struct plist_node variable name * @__prio: initial node priority @@ -119,13 +133,13 @@ struct plist_node { #define PLIST_NODE_INIT(node, __prio) \ { \ .prio = (__prio), \ - .plist = { _PLIST_HEAD_INIT((node).plist) }, \ + .plist = { _PLIST_HEAD_INIT((node).plist) }, \ } /** * plist_head_init - dynamic struct plist_head initializer * @head: &struct plist_head pointer - * @lock: list spinlock, remembered for debugging + * @lock: spinlock protecting the list (debugging) */ static inline void plist_head_init(struct plist_head *head, spinlock_t *lock) @@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->node_list); #ifdef CONFIG_DEBUG_PI_LIST - head->lock = lock; + head->spinlock = lock; + head->rawlock = NULL; +#endif +} + +/** + * plist_head_init_raw - dynamic struct plist_head initializer + * @head: &struct plist_head pointer + * @lock: raw_spinlock protecting the list (debugging) + */ +static inline void +plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) +{ + INIT_LIST_HEAD(&head->prio_list); + INIT_LIST_HEAD(&head->node_list); +#ifdef CONFIG_DEBUG_PI_LIST + head->rawlock = lock; + head->spinlock = NULL; #endif } diff --git a/include/linux/pm.h b/include/linux/pm.h index 0d65934246a..198b8f9fe05 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -219,7 +219,7 @@ struct dev_pm_ops { * to RAM and hibernation. */ #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ -struct dev_pm_ops name = { \ +const struct dev_pm_ops name = { \ .suspend = suspend_fn, \ .resume = resume_fn, \ .freeze = suspend_fn, \ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index cb0ba703260..b019ae64e2a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -26,6 +26,9 @@ */ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ +#ifdef CONFIG_KSM + atomic_t ksm_refcount; +#endif /* * NOTE: the LSB of the head.next is set by * mm_take_all_locks() _after_ taking the above lock. So the @@ -38,6 +41,34 @@ struct anon_vma { }; #ifdef CONFIG_MMU +#ifdef CONFIG_KSM +static inline void ksm_refcount_init(struct anon_vma *anon_vma) +{ + atomic_set(&anon_vma->ksm_refcount, 0); +} + +static inline int ksm_refcount(struct anon_vma *anon_vma) +{ + return atomic_read(&anon_vma->ksm_refcount); +} +#else +static inline void ksm_refcount_init(struct anon_vma *anon_vma) +{ +} + +static inline int ksm_refcount(struct anon_vma *anon_vma) +{ + return 0; +} +#endif /* CONFIG_KSM */ + +static inline struct anon_vma *page_anon_vma(struct page *page) +{ + if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != + PAGE_MAPPING_ANON) + return NULL; + return page_rmapping(page); +} static inline void anon_vma_lock(struct vm_area_struct *vma) { @@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); void anon_vma_unlink(struct vm_area_struct *); void anon_vma_link(struct vm_area_struct *); void __anon_vma_link(struct vm_area_struct *); +void anon_vma_free(struct anon_vma *); /* * rmap interfaces called when adding or removing pte of page @@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page) */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt, unsigned long *vm_flags); +int page_referenced_one(struct page *, struct vm_area_struct *, + unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); + enum ttu_flags { TTU_UNMAP = 0, /* unmap mode */ TTU_MIGRATION = 1, /* migration mode */ @@ -94,6 +129,8 @@ enum ttu_flags { #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) int try_to_unmap(struct page *, enum ttu_flags flags); +int try_to_unmap_one(struct page *, struct vm_area_struct *, + unsigned long address, enum ttu_flags flags); /* * Called from mm/filemap_xip.c to unmap empty zero page @@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page); void page_unlock_anon_vma(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +/* + * Called by migrate.c to remove migration ptes, but might be used more later. + */ +int rmap_walk(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg); + #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index f19b00b7d53..281d8fd775e 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -24,7 +24,7 @@ * @owner: the mutex owner */ struct rt_mutex { - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES @@ -63,8 +63,8 @@ struct hrtimer_sleeper; #endif #define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h new file mode 100644 index 00000000000..71e0b00b6f2 --- /dev/null +++ b/include/linux/rwlock.h @@ -0,0 +1,125 @@ +#ifndef __LINUX_RWLOCK_H +#define __LINUX_RWLOCK_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * rwlock related methods + * + * split out from spinlock.h + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +# define rwlock_init(lock) \ + do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void do_raw_read_lock(rwlock_t *lock); +#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) + extern int do_raw_read_trylock(rwlock_t *lock); + extern void do_raw_read_unlock(rwlock_t *lock); + extern void do_raw_write_lock(rwlock_t *lock); +#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) + extern int do_raw_write_trylock(rwlock_t *lock); + extern void do_raw_write_unlock(rwlock_t *lock); +#else +# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) +# define do_raw_read_lock_flags(lock, flags) \ + arch_read_lock_flags(&(lock)->raw_lock, *(flags)) +# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) +# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) +# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) +# define do_raw_write_lock_flags(lock, flags) \ + arch_write_lock_flags(&(lock)->raw_lock, *(flags)) +# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) +# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) +#endif + +#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) + +/* + * Define the various rw_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various + * methods are defined as nops in the case they are not required. + */ +#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) + +#define write_lock(lock) _raw_write_lock(lock) +#define read_lock(lock) _raw_read_lock(lock) + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_read_lock_irqsave(lock); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_write_lock_irqsave(lock); \ + } while (0) + +#else + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_read_lock_irqsave(lock, flags); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_write_lock_irqsave(lock, flags); \ + } while (0) + +#endif + +#define read_lock_irq(lock) _raw_read_lock_irq(lock) +#define read_lock_bh(lock) _raw_read_lock_bh(lock) +#define write_lock_irq(lock) _raw_write_lock_irq(lock) +#define write_lock_bh(lock) _raw_write_lock_bh(lock) +#define read_unlock(lock) _raw_read_unlock(lock) +#define write_unlock(lock) _raw_write_unlock(lock) +#define read_unlock_irq(lock) _raw_read_unlock_irq(lock) +#define write_unlock_irq(lock) _raw_write_unlock_irq(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_read_unlock_irqrestore(lock, flags); \ + } while (0) +#define read_unlock_bh(lock) _raw_read_unlock_bh(lock) + +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_write_unlock_irqrestore(lock, flags); \ + } while (0) +#define write_unlock_bh(lock) _raw_write_unlock_bh(lock) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + write_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + +#endif /* __LINUX_RWLOCK_H */ diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h new file mode 100644 index 00000000000..9c9f0495d37 --- /dev/null +++ b/include/linux/rwlock_api_smp.h @@ -0,0 +1,282 @@ +#ifndef __LINUX_RWLOCK_API_SMP_H +#define __LINUX_RWLOCK_API_SMP_H + +#ifndef __LINUX_SPINLOCK_API_SMP_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/rwlock_api_smp.h + * + * spinlock API declarations on SMP (and debug) + * (implemented in kernel/spinlock.c) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); +unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) + __acquires(lock); +unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) + __acquires(lock); +int __lockfunc _raw_read_trylock(rwlock_t *lock); +int __lockfunc _raw_write_trylock(rwlock_t *lock); +void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc +_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); +void __lockfunc +_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); + +#ifdef CONFIG_INLINE_READ_LOCK +#define _raw_read_lock(lock) __raw_read_lock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK +#define _raw_write_lock(lock) __raw_write_lock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_BH +#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_BH +#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQ +#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ +#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE +#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE +#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_READ_TRYLOCK +#define _raw_read_trylock(lock) __raw_read_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_TRYLOCK +#define _raw_write_trylock(lock) __raw_write_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK +#define _raw_read_unlock(lock) __raw_read_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK +#define _raw_write_unlock(lock) __raw_write_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_BH +#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH +#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ +#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ +#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE +#define _raw_read_unlock_irqrestore(lock, flags) \ + __raw_read_unlock_irqrestore(lock, flags) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE +#define _raw_write_unlock_irqrestore(lock, flags) \ + __raw_write_unlock_irqrestore(lock, flags) +#endif + +static inline int __raw_read_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (do_raw_read_trylock(lock)) { + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +static inline int __raw_write_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (do_raw_write_trylock(lock)) { + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline void __raw_read_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, + do_raw_read_lock_flags, &flags); + return flags; +} + +static inline void __raw_read_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline void __raw_read_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, + do_raw_write_lock_flags, &flags); + return flags; +} + +static inline void __raw_write_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +static inline void __raw_write_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +static inline void __raw_write_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +#endif /* CONFIG_PREEMPT */ + +static inline void __raw_write_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + preempt_enable(); +} + +static inline void __raw_read_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + preempt_enable(); +} + +static inline void +__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_read_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_read_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, + unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_write_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_write_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +#endif /* __LINUX_RWLOCK_API_SMP_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h new file mode 100644 index 00000000000..bd31808c7d8 --- /dev/null +++ b/include/linux/rwlock_types.h @@ -0,0 +1,56 @@ +#ifndef __LINUX_RWLOCK_TYPES_H +#define __LINUX_RWLOCK_TYPES_H + +/* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ +typedef struct { + arch_rwlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define RWLOCK_MAGIC 0xdeaf1eed + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ + .magic = RWLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1, \ + RW_DEP_MAP_INIT(lockname) } +#else +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ + RW_DEP_MAP_INIT(lockname) } +#endif + +/* + * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence + * deprecated. + * + * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate. + */ +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) + +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) + +#endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index 6c3c0f6c261..bdfcc252797 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem); extern void __up_read(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem); extern void __downgrade_write(struct rw_semaphore *sem); - -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->activity != 0); -} +extern int rwsem_is_locked(struct rw_semaphore *sem); #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 294eb2f8014..5c858f38e81 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1409,7 +1409,7 @@ struct task_struct { #endif /* Protection of the PI data structures: */ - spinlock_t pi_lock; + raw_spinlock_t pi_lock; #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task */ @@ -1446,8 +1446,10 @@ struct task_struct { gfp_t lockdep_reclaim_gfp; #endif +#ifdef CONFIG_FS_JOURNAL_INFO /* journalling filesystem info */ void *journal_info; +#endif /* stacked block device info */ struct bio *bio_list, **bio_tail; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 71dccfeb0d8..86088213334 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -8,13 +8,13 @@ * * on SMP builds: * - * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the + * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * - * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel + * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) @@ -34,7 +34,7 @@ * defines the generic type and initializers * * linux/spinlock_up.h: - * contains the __raw_spin_*()/etc. version of UP + * contains the arch_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * @@ -75,12 +75,12 @@ #define __lockfunc __attribute__((section(".spinlock.text"))) /* - * Pull the raw_spinlock_t and raw_rwlock_t definitions: + * Pull the arch_spinlock_t and arch_rwlock_t definitions: */ #include <linux/spinlock_types.h> /* - * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): + * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them): */ #ifdef CONFIG_SMP # include <asm/spinlock.h> @@ -89,45 +89,31 @@ #endif #ifdef CONFIG_DEBUG_SPINLOCK - extern void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key); -# define spin_lock_init(lock) \ + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ - __spin_lock_init((lock), #lock, &__key); \ + __raw_spin_lock_init((lock), #lock, &__key); \ } while (0) #else -# define spin_lock_init(lock) \ - do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) +# define raw_spin_lock_init(lock) \ + do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK - extern void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key); -# define rwlock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __rwlock_init((lock), #lock, &__key); \ -} while (0) -#else -# define rwlock_init(lock) \ - do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) -#endif - -#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) +#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #ifdef CONFIG_GENERIC_LOCKBREAK -#define spin_is_contended(lock) ((lock)->break_lock) +#define raw_spin_is_contended(lock) ((lock)->break_lock) #else -#ifdef __raw_spin_is_contended -#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) +#ifdef arch_spin_is_contended +#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else -#define spin_is_contended(lock) (((void)(lock), 0)) -#endif /*__raw_spin_is_contended*/ +#define raw_spin_is_contended(lock) (((void)(lock), 0)) +#endif /*arch_spin_is_contended*/ #endif /* The lock does not imply full memory barrier. */ @@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } #endif /** - * spin_unlock_wait - wait until the spinlock gets unlocked + * raw_spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. */ -#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) +#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) #ifdef CONFIG_DEBUG_SPINLOCK - extern void _raw_spin_lock(spinlock_t *lock); -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - extern int _raw_spin_trylock(spinlock_t *lock); - extern void _raw_spin_unlock(spinlock_t *lock); - extern void _raw_read_lock(rwlock_t *lock); -#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) - extern int _raw_read_trylock(rwlock_t *lock); - extern void _raw_read_unlock(rwlock_t *lock); - extern void _raw_write_lock(rwlock_t *lock); -#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) - extern int _raw_write_trylock(rwlock_t *lock); - extern void _raw_write_unlock(rwlock_t *lock); + extern void do_raw_spin_lock(raw_spinlock_t *lock); +#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) + extern int do_raw_spin_trylock(raw_spinlock_t *lock); + extern void do_raw_spin_unlock(raw_spinlock_t *lock); #else -# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) -# define _raw_spin_lock_flags(lock, flags) \ - __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) -# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) -# define _raw_read_lock_flags(lock, flags) \ - __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) -# define _raw_write_lock_flags(lock, flags) \ - __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) +static inline void do_raw_spin_lock(raw_spinlock_t *lock) +{ + arch_spin_lock(&lock->raw_lock); +} + +static inline void +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) +{ + arch_spin_lock_flags(&lock->raw_lock, *flags); +} + +static inline int do_raw_spin_trylock(raw_spinlock_t *lock) +{ + return arch_spin_trylock(&(lock)->raw_lock); +} + +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) +{ + arch_spin_unlock(&lock->raw_lock); +} #endif -#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) -#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) - /* - * Define the various spin_lock and rw_lock methods. Note we define these - * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various - * methods are defined as nops in the case they are not required. + * Define the various spin_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The + * various methods are defined as nops in the case they are not + * required. */ -#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) -#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) -#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) +#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) -#define spin_lock(lock) _spin_lock(lock) +#define raw_spin_lock(lock) _raw_spin_lock(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) -# define spin_lock_nest_lock(lock, nest_lock) \ +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock_nested(lock, subclass) + +# define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ - _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else -# define spin_lock_nested(lock, subclass) _spin_lock(lock) -# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) +# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) +# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif -#define write_lock(lock) _write_lock(lock) -#define read_lock(lock) _read_lock(lock) - #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -#define spin_lock_irqsave(lock, flags) \ +#define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave(lock); \ - } while (0) -#define read_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _read_lock_irqsave(lock); \ - } while (0) -#define write_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _write_lock_irqsave(lock); \ + flags = _raw_spin_lock_irqsave(lock); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define spin_lock_irqsave_nested(lock, flags, subclass) \ +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave_nested(lock, subclass); \ + flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else -#define spin_lock_irqsave_nested(lock, flags, subclass) \ +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave(lock); \ + flags = _raw_spin_lock_irqsave(lock); \ } while (0) #endif #else -#define spin_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _spin_lock_irqsave(lock, flags); \ - } while (0) -#define read_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _read_lock_irqsave(lock, flags); \ - } while (0) -#define write_lock_irqsave(lock, flags) \ +#define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - _write_lock_irqsave(lock, flags); \ + _raw_spin_lock_irqsave(lock, flags); \ } while (0) -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - spin_lock_irqsave(lock, flags) -#endif +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + raw_spin_lock_irqsave(lock, flags) -#define spin_lock_irq(lock) _spin_lock_irq(lock) -#define spin_lock_bh(lock) _spin_lock_bh(lock) -#define read_lock_irq(lock) _read_lock_irq(lock) -#define read_lock_bh(lock) _read_lock_bh(lock) -#define write_lock_irq(lock) _write_lock_irq(lock) -#define write_lock_bh(lock) _write_lock_bh(lock) -#define spin_unlock(lock) _spin_unlock(lock) -#define read_unlock(lock) _read_unlock(lock) -#define write_unlock(lock) _write_unlock(lock) -#define spin_unlock_irq(lock) _spin_unlock_irq(lock) -#define read_unlock_irq(lock) _read_unlock_irq(lock) -#define write_unlock_irq(lock) _write_unlock_irq(lock) - -#define spin_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _spin_unlock_irqrestore(lock, flags); \ - } while (0) -#define spin_unlock_bh(lock) _spin_unlock_bh(lock) +#endif -#define read_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _read_unlock_irqrestore(lock, flags); \ - } while (0) -#define read_unlock_bh(lock) _read_unlock_bh(lock) +#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) +#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) +#define raw_spin_unlock(lock) _raw_spin_unlock(lock) +#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) -#define write_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _write_unlock_irqrestore(lock, flags); \ +#define raw_spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_spin_unlock_irqrestore(lock, flags); \ } while (0) -#define write_unlock_bh(lock) _write_unlock_bh(lock) +#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) -#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) +#define raw_spin_trylock_bh(lock) \ + __cond_lock(lock, _raw_spin_trylock_bh(lock)) -#define spin_trylock_irq(lock) \ +#define raw_spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ - spin_trylock(lock) ? \ + raw_spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ }) -#define spin_trylock_irqsave(lock, flags) \ +#define raw_spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ - spin_trylock(lock) ? \ + raw_spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) -#define write_trylock_irqsave(lock, flags) \ -({ \ - local_irq_save(flags); \ - write_trylock(lock) ? \ - 1 : ({ local_irq_restore(flags); 0; }); \ +/** + * raw_spin_can_lock - would raw_spin_trylock() succeed? + * @lock: the spinlock in question. + */ +#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + +/* Include rwlock functions */ +#include <linux/rwlock.h> + +/* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: + */ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +# include <linux/spinlock_api_smp.h> +#else +# include <linux/spinlock_api_up.h> +#endif + +/* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ + +static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) +{ + return &lock->rlock; +} + +#define spin_lock_init(_lock) \ +do { \ + spinlock_check(_lock); \ + raw_spin_lock_init(&(_lock)->rlock); \ +} while (0) + +static inline void spin_lock(spinlock_t *lock) +{ + raw_spin_lock(&lock->rlock); +} + +static inline void spin_lock_bh(spinlock_t *lock) +{ + raw_spin_lock_bh(&lock->rlock); +} + +static inline int spin_trylock(spinlock_t *lock) +{ + return raw_spin_trylock(&lock->rlock); +} + +#define spin_lock_nested(lock, subclass) \ +do { \ + raw_spin_lock_nested(spinlock_check(lock), subclass); \ +} while (0) + +#define spin_lock_nest_lock(lock, nest_lock) \ +do { \ + raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ +} while (0) + +static inline void spin_lock_irq(spinlock_t *lock) +{ + raw_spin_lock_irq(&lock->rlock); +} + +#define spin_lock_irqsave(lock, flags) \ +do { \ + raw_spin_lock_irqsave(spinlock_check(lock), flags); \ +} while (0) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ +do { \ + raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ +} while (0) + +static inline void spin_unlock(spinlock_t *lock) +{ + raw_spin_unlock(&lock->rlock); +} + +static inline void spin_unlock_bh(spinlock_t *lock) +{ + raw_spin_unlock_bh(&lock->rlock); +} + +static inline void spin_unlock_irq(spinlock_t *lock) +{ + raw_spin_unlock_irq(&lock->rlock); +} + +static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&lock->rlock, flags); +} + +static inline int spin_trylock_bh(spinlock_t *lock) +{ + return raw_spin_trylock_bh(&lock->rlock); +} + +static inline int spin_trylock_irq(spinlock_t *lock) +{ + return raw_spin_trylock_irq(&lock->rlock); +} + +#define spin_trylock_irqsave(lock, flags) \ +({ \ + raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) +static inline void spin_unlock_wait(spinlock_t *lock) +{ + raw_spin_unlock_wait(&lock->rlock); +} + +static inline int spin_is_locked(spinlock_t *lock) +{ + return raw_spin_is_locked(&lock->rlock); +} + +static inline int spin_is_contended(spinlock_t *lock) +{ + return raw_spin_is_contended(&lock->rlock); +} + +static inline int spin_can_lock(spinlock_t *lock) +{ + return raw_spin_can_lock(&lock->rlock); +} + +static inline void assert_spin_locked(spinlock_t *lock) +{ + assert_raw_spin_locked(&lock->rlock); +} + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) @@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) -/** - * spin_can_lock - would spin_trylock() succeed? - * @lock: the spinlock in question. - */ -#define spin_can_lock(lock) (!spin_is_locked(lock)) - -/* - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: - */ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -# include <linux/spinlock_api_smp.h> -#else -# include <linux/spinlock_api_up.h> -#endif - #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8264a7f459b..e253ccd7a60 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -17,165 +17,76 @@ int in_lock_functions(unsigned long addr); -#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) - -void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) - __acquires(lock); -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) - __acquires(lock); -void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) - __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) - __acquires(lock); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) - __acquires(lock); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) - __acquires(lock); -int __lockfunc _spin_trylock(spinlock_t *lock); -int __lockfunc _read_trylock(rwlock_t *lock); -int __lockfunc _write_trylock(rwlock_t *lock); -int __lockfunc _spin_trylock_bh(spinlock_t *lock); -void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) - __releases(lock); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); +#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) + +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); +void __lockfunc +_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); +void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) + __acquires(lock); + +unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) + __acquires(lock); +unsigned long __lockfunc +_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); +int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); +void __lockfunc +_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) + __releases(lock); #ifdef CONFIG_INLINE_SPIN_LOCK -#define _spin_lock(lock) __spin_lock(lock) -#endif - -#ifdef CONFIG_INLINE_READ_LOCK -#define _read_lock(lock) __read_lock(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK -#define _write_lock(lock) __write_lock(lock) +#define _raw_spin_lock(lock) __raw_spin_lock(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_BH -#define _spin_lock_bh(lock) __spin_lock_bh(lock) -#endif - -#ifdef CONFIG_INLINE_READ_LOCK_BH -#define _read_lock_bh(lock) __read_lock_bh(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK_BH -#define _write_lock_bh(lock) __write_lock_bh(lock) +#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ -#define _spin_lock_irq(lock) __spin_lock_irq(lock) -#endif - -#ifdef CONFIG_INLINE_READ_LOCK_IRQ -#define _read_lock_irq(lock) __read_lock_irq(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ -#define _write_lock_irq(lock) __write_lock_irq(lock) +#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE -#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) -#endif - -#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE -#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE -#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) +#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) #endif #ifdef CONFIG_INLINE_SPIN_TRYLOCK -#define _spin_trylock(lock) __spin_trylock(lock) -#endif - -#ifdef CONFIG_INLINE_READ_TRYLOCK -#define _read_trylock(lock) __read_trylock(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_TRYLOCK -#define _write_trylock(lock) __write_trylock(lock) +#define _raw_spin_trylock(lock) __raw_spin_trylock(lock) #endif #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH -#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) +#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK -#define _spin_unlock(lock) __spin_unlock(lock) -#endif - -#ifdef CONFIG_INLINE_READ_UNLOCK -#define _read_unlock(lock) __read_unlock(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK -#define _write_unlock(lock) __write_unlock(lock) +#define _raw_spin_unlock(lock) __raw_spin_unlock(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH -#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) -#endif - -#ifdef CONFIG_INLINE_READ_UNLOCK_BH -#define _read_unlock_bh(lock) __read_unlock_bh(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH -#define _write_unlock_bh(lock) __write_unlock_bh(lock) +#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ -#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) -#endif - -#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ -#define _read_unlock_irq(lock) __read_unlock_irq(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ -#define _write_unlock_irq(lock) __write_unlock_irq(lock) +#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE -#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) -#endif - -#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE -#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE -#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) +#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) #endif -static inline int __spin_trylock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { preempt_disable(); - if (_raw_spin_trylock(lock)) { + if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } @@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock) return 0; } -static inline int __read_trylock(rwlock_t *lock) -{ - preempt_disable(); - if (_raw_read_trylock(lock)) { - rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); - return 1; - } - preempt_enable(); - return 0; -} - -static inline int __write_trylock(rwlock_t *lock) -{ - preempt_disable(); - if (_raw_write_trylock(lock)) { - rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); - return 1; - } - preempt_enable(); - return 0; -} - /* * If lockdep is enabled then we use the non-preemption spin-ops * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are @@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock) */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) -static inline void __read_lock(rwlock_t *lock) -{ - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); -} - -static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) +static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) { unsigned long flags; @@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); /* * On lockdep we dont want the hand-coded irq-enable of - * _raw_spin_lock_flags() code, because lockdep assumes + * do_raw_spin_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ #ifdef CONFIG_LOCKDEP - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); #else - _raw_spin_lock_flags(lock, &flags); + do_raw_spin_lock_flags(lock, &flags); #endif return flags; } -static inline void __spin_lock_irq(spinlock_t *lock) +static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -static inline void __spin_lock_bh(spinlock_t *lock) +static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); -} - -static inline unsigned long __read_lock_irqsave(rwlock_t *lock) -{ - unsigned long flags; - - local_irq_save(flags); - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, - _raw_read_lock_flags, &flags); - return flags; -} - -static inline void __read_lock_irq(rwlock_t *lock) -{ - local_irq_disable(); - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); -} - -static inline void __read_lock_bh(rwlock_t *lock) -{ - local_bh_disable(); - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); -} - -static inline unsigned long __write_lock_irqsave(rwlock_t *lock) -{ - unsigned long flags; - - local_irq_save(flags); - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, - _raw_write_lock_flags, &flags); - return flags; -} - -static inline void __write_lock_irq(rwlock_t *lock) -{ - local_irq_disable(); - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -static inline void __write_lock_bh(rwlock_t *lock) -{ - local_bh_disable(); - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); -} - -static inline void __spin_lock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); -} - -static inline void __write_lock(rwlock_t *lock) -{ - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } #endif /* CONFIG_PREEMPT */ -static inline void __spin_unlock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); - preempt_enable(); -} - -static inline void __write_unlock(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - preempt_enable(); -} - -static inline void __read_unlock(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); + do_raw_spin_unlock(lock); preempt_enable(); } -static inline void __spin_unlock_irqrestore(spinlock_t *lock, +static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); + do_raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } -static inline void __spin_unlock_irq(spinlock_t *lock) +static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); + do_raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } -static inline void __spin_unlock_bh(spinlock_t *lock) +static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); -} - -static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); - local_irq_restore(flags); - preempt_enable(); -} - -static inline void __read_unlock_irq(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); - local_irq_enable(); - preempt_enable(); -} - -static inline void __read_unlock_bh(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); + do_raw_spin_unlock(lock); preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -static inline void __write_unlock_irqrestore(rwlock_t *lock, - unsigned long flags) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - local_irq_restore(flags); - preempt_enable(); -} - -static inline void __write_unlock_irq(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - local_irq_enable(); - preempt_enable(); -} - -static inline void __write_unlock_bh(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); -} - -static inline int __spin_trylock_bh(spinlock_t *lock) +static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); - if (_raw_spin_trylock(lock)) { + if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } @@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock) return 0; } +#include <linux/rwlock_api_smp.h> + #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index 04e1d316457..af1f47229e7 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -16,7 +16,7 @@ #define in_lock_functions(ADDR) 0 -#define assert_spin_locked(lock) do { (void)(lock); } while (0) +#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) /* * In the UP-nondebug case there's no real locking going on, so the @@ -40,7 +40,8 @@ do { preempt_enable(); __release(lock); (void)(lock); } while (0) #define __UNLOCK_BH(lock) \ - do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) + do { preempt_enable_no_resched(); local_bh_enable(); \ + __release(lock); (void)(lock); } while (0) #define __UNLOCK_IRQ(lock) \ do { local_irq_enable(); __UNLOCK(lock); } while (0) @@ -48,34 +49,37 @@ #define __UNLOCK_IRQRESTORE(lock, flags) \ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) -#define _spin_lock(lock) __LOCK(lock) -#define _spin_lock_nested(lock, subclass) __LOCK(lock) -#define _read_lock(lock) __LOCK(lock) -#define _write_lock(lock) __LOCK(lock) -#define _spin_lock_bh(lock) __LOCK_BH(lock) -#define _read_lock_bh(lock) __LOCK_BH(lock) -#define _write_lock_bh(lock) __LOCK_BH(lock) -#define _spin_lock_irq(lock) __LOCK_IRQ(lock) -#define _read_lock_irq(lock) __LOCK_IRQ(lock) -#define _write_lock_irq(lock) __LOCK_IRQ(lock) -#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) -#define _read_trylock(lock) ({ __LOCK(lock); 1; }) -#define _write_trylock(lock) ({ __LOCK(lock); 1; }) -#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) -#define _spin_unlock(lock) __UNLOCK(lock) -#define _read_unlock(lock) __UNLOCK(lock) -#define _write_unlock(lock) __UNLOCK(lock) -#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) -#define _write_unlock_bh(lock) __UNLOCK_BH(lock) -#define _read_unlock_bh(lock) __UNLOCK_BH(lock) -#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_spin_lock(lock) __LOCK(lock) +#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_read_lock(lock) __LOCK(lock) +#define _raw_write_lock(lock) __LOCK(lock) +#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) +#define _raw_read_lock_bh(lock) __LOCK_BH(lock) +#define _raw_write_lock_bh(lock) __LOCK_BH(lock) +#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) +#define _raw_spin_unlock(lock) __UNLOCK(lock) +#define _raw_read_unlock(lock) __UNLOCK(lock) +#define _raw_write_unlock(lock) __UNLOCK(lock) +#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_spin_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_read_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_write_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) #endif /* __LINUX_SPINLOCK_API_UP_H */ diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 68d88f71f1a..851b7783720 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -17,8 +17,8 @@ #include <linux/lockdep.h> -typedef struct { - raw_spinlock_t raw_lock; +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif @@ -29,26 +29,10 @@ typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif -} spinlock_t; +} raw_spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead -typedef struct { - raw_rwlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK - unsigned int break_lock; -#endif -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} rwlock_t; - -#define RWLOCK_MAGIC 0xdeaf1eed - #define SPINLOCK_OWNER_INIT ((void *)-1L) #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -57,44 +41,56 @@ typedef struct { # define SPIN_DEP_MAP_INIT(lockname) #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, #else -# define RW_DEP_MAP_INIT(lockname) +# define SPIN_DEBUG_INIT(lockname) #endif -#ifdef CONFIG_DEBUG_SPINLOCK -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ - .magic = SPINLOCK_MAGIC, \ - .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1, \ - SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ - .magic = RWLOCK_MAGIC, \ - .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1, \ - RW_DEP_MAP_INIT(lockname) } -#else -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ - SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ - RW_DEP_MAP_INIT(lockname) } +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; #endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) /* - * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and - * are hence deprecated. - * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or - * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. + * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence + * deprecated. + * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as + * appropriate. */ #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) -#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) + +#include <linux/rwlock_types.h> #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198..c09b6407ae1 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -16,22 +16,22 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else -typedef struct { } raw_spinlock_t; +typedef struct { } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { } +#define __ARCH_SPIN_LOCK_UNLOCKED { } #endif typedef struct { /* no debug version on UP */ -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { } +#define __ARCH_RW_LOCK_UNLOCKED { } #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index d4841ed8215..b14f6a91e19 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -18,21 +18,21 @@ */ #ifdef CONFIG_DEBUG_SPINLOCK -#define __raw_spin_is_locked(x) ((x)->slock == 0) +#define arch_spin_is_locked(x) ((x)->slock == 0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { char oldval = lock->slock; @@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return oldval > 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { lock->slock = 1; } @@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) /* * Read-write spinlocks. No debug version. */ -#define __raw_read_lock(lock) do { (void)(lock); } while (0) -#define __raw_write_lock(lock) do { (void)(lock); } while (0) -#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) -#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) -#define __raw_read_unlock(lock) do { (void)(lock); } while (0) -#define __raw_write_unlock(lock) do { (void)(lock); } while (0) +#define arch_read_lock(lock) do { (void)(lock); } while (0) +#define arch_write_lock(lock) do { (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ (void)(lock); 1; }) +#define arch_read_unlock(lock) do { (void)(lock); } while (0) +#define arch_write_unlock(lock) do { (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ -#define __raw_spin_is_locked(lock) ((void)(lock), 0) +#define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define __raw_spin_lock(lock) do { (void)(lock); } while (0) -# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) -# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) -# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) +# define arch_spin_lock(lock) do { (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ -#define __raw_spin_is_contended(lock) (((void)(lock), 0)) +#define arch_spin_is_contended(lock) (((void)(lock), 0)) -#define __raw_read_can_lock(lock) (((void)(lock), 1)) -#define __raw_write_can_lock(lock) (((void)(lock), 1)) +#define arch_read_can_lock(lock) (((void)(lock), 1)) +#define arch_write_can_lock(lock) (((void)(lock), 1)) -#define __raw_spin_unlock_wait(lock) \ - do { cpu_relax(); } while (__raw_spin_is_locked(lock)) +#define arch_spin_unlock_wait(lock) \ + do { cpu_relax(); } while (arch_spin_is_locked(lock)) #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/string.h b/include/linux/string.h index b8508868d5a..651839a2a75 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -62,7 +62,15 @@ extern char * strnchr(const char *, size_t, int); #ifndef __HAVE_ARCH_STRRCHR extern char * strrchr(const char *,int); #endif -extern char * __must_check strstrip(char *); +extern char * __must_check skip_spaces(const char *); + +extern char *strim(char *); + +static inline __must_check char *strstrip(char *str) +{ + return strim(str); +} + #ifndef __HAVE_ARCH_STRSTR extern char * strstr(const char *,const char *); #endif diff --git a/include/linux/swap.h b/include/linux/swap.h index 4ec90019c1a..a2602a8207a 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -145,38 +145,43 @@ enum { SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ + SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ /* add others here before... */ SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32 -#define SWAP_MAP_MAX 0x7ffe -#define SWAP_MAP_BAD 0x7fff -#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ -#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) +#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ +#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ +#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ +#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ +#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ +#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ + /* * The in-memory structure used to track swap areas. */ struct swap_info_struct { - unsigned long flags; - int prio; /* swap priority */ - int next; /* next entry on swap list */ - struct file *swap_file; - struct block_device *bdev; - struct list_head extent_list; - struct swap_extent *curr_swap_extent; - unsigned short *swap_map; - unsigned int lowest_bit; - unsigned int highest_bit; + unsigned long flags; /* SWP_USED etc: see above */ + signed short prio; /* swap priority of this type */ + signed char type; /* strange name for an index */ + signed char next; /* next type on the swap list */ + unsigned int max; /* extent of the swap_map */ + unsigned char *swap_map; /* vmalloc'ed array of usage counts */ + unsigned int lowest_bit; /* index of first free in swap_map */ + unsigned int highest_bit; /* index of last free in swap_map */ + unsigned int pages; /* total of usable pages of swap */ + unsigned int inuse_pages; /* number of those currently in use */ + unsigned int cluster_next; /* likely index for next allocation */ + unsigned int cluster_nr; /* countdown to next cluster search */ unsigned int lowest_alloc; /* while preparing discard cluster */ unsigned int highest_alloc; /* while preparing discard cluster */ - unsigned int cluster_next; - unsigned int cluster_nr; - unsigned int pages; - unsigned int max; - unsigned int inuse_pages; - unsigned int old_block_size; + struct swap_extent *curr_swap_extent; + struct swap_extent first_swap_extent; + struct block_device *bdev; /* swap device or bdev of swap file */ + struct file *swap_file; /* seldom referenced */ + unsigned int old_block_size; /* seldom referenced */ }; struct swap_list_t { @@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node); extern int kswapd_run(int nid); +extern void kswapd_stop(int nid); #ifdef CONFIG_MMU /* linux/mm/shmem.c */ @@ -309,17 +315,18 @@ extern long total_swap_pages; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); -extern void swap_duplicate(swp_entry_t); -extern int swapcache_prepare(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); +extern int add_swap_count_continuation(swp_entry_t, gfp_t); +extern void swap_shmem_alloc(swp_entry_t); +extern int swap_duplicate(swp_entry_t); +extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free(swp_entry_t, struct page *page); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); -extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); +extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); -extern struct swap_info_struct *get_swap_info_struct(unsigned); extern int reuse_swap_page(struct page *); extern int try_to_free_swap(struct page *); struct backing_dev_info; @@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void) #define free_swap_and_cache(swp) is_migration_entry(swp) #define swapcache_prepare(swp) is_migration_entry(swp) -static inline void swap_duplicate(swp_entry_t swp) +static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { + return 0; +} + +static inline void swap_shmem_alloc(swp_entry_t swp) +{ +} + +static inline int swap_duplicate(swp_entry_t swp) +{ + return 0; } static inline void swap_free(swp_entry_t swp) diff --git a/include/linux/tty.h b/include/linux/tty.h index 405a9035fe4..ef3a2947b10 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *); extern struct ktermios tty_std_termios; -extern int kmsg_redirect; - extern void console_init(void); extern int vcs_init(void); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d85889710f9..ee03bba9c5d 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSCAN_ZONE_RECLAIM_FAILED, #endif PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, + KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, + KSWAPD_SKIP_CONGESTION_WAIT, PAGEOUTRUN, ALLOCSTALL, PGROTATED, #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, diff --git a/include/linux/vt.h b/include/linux/vt.h index 7ffa11f0623..3fb9944e50a 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -84,4 +84,19 @@ struct vt_setactivate { #define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ +#ifdef CONFIG_VT_CONSOLE + +extern int vt_kmsg_redirect(int new); + +#else + +static inline int vt_kmsg_redirect(int new) +{ + return 0; +} + +#endif + +#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1) + #endif /* _LINUX_VT_H */ diff --git a/init/Kconfig b/init/Kconfig index 54c655ce9c0..a23da9f0180 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1079,6 +1079,28 @@ config SLOB endchoice +config MMAP_ALLOW_UNINITIALIZED + bool "Allow mmapped anonymous memory to be uninitialized" + depends on EMBEDDED && !MMU + default n + help + Normally, and according to the Linux spec, anonymous memory obtained + from mmap() has it's contents cleared before it is passed to + userspace. Enabling this config option allows you to request that + mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus + providing a huge performance boost. If this option is not enabled, + then the flag will be ignored. + + This is taken advantage of by uClibc's malloc(), and also by + ELF-FDPIC binfmt's brk and stack allocator. + + Because of the obvious security issues, this option should only be + enabled on embedded devices where you control what is run in + userspace. Since that isn't generally a problem on no-MMU systems, + it is normally safe to say Y here. + + See Documentation/nommu-mmap.txt for more information. + config PROFILING bool "Profiling support (EXPERIMENTAL)" help diff --git a/init/main.c b/init/main.c index 4051d75dd2d..c3db4a98b36 100644 --- a/init/main.c +++ b/init/main.c @@ -691,10 +691,10 @@ asmlinkage void __init start_kernel(void) static void __init do_ctors(void) { #ifdef CONFIG_CONSTRUCTORS - ctor_fn_t *call = (ctor_fn_t *) __ctors_start; + ctor_fn_t *fn = (ctor_fn_t *) __ctors_start; - for (; call < (ctor_fn_t *) __ctors_end; call++) - (*call)(); + for (; fn < (ctor_fn_t *) __ctors_end; fn++) + (*fn)(); #endif } @@ -755,10 +755,10 @@ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; static void __init do_initcalls(void) { - initcall_t *call; + initcall_t *fn; - for (call = __early_initcall_end; call < __initcall_end; call++) - do_one_initcall(*call); + for (fn = __early_initcall_end; fn < __initcall_end; fn++) + do_one_initcall(*fn); /* Make sure there is no pending stuff from the initcall sequence */ flush_scheduled_work(); @@ -785,10 +785,10 @@ static void __init do_basic_setup(void) static void __init do_pre_smp_initcalls(void) { - initcall_t *call; + initcall_t *fn; - for (call = __initcall_start; call < __early_initcall_end; call++) - do_one_initcall(*call); + for (fn = __initcall_start; fn < __early_initcall_end; fn++) + do_one_initcall(*fn); } static void run_init_process(char *init_filename) diff --git a/kernel/acct.c b/kernel/acct.c index 9a4715a2f6b..a6605ca921b 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -536,7 +536,8 @@ static void do_acct_process(struct bsd_acct_struct *acct, do_div(elapsed, AHZ); ac.ac_btime = get_seconds() - elapsed; /* we really need to bite the bullet and change layout */ - current_uid_gid(&ac.ac_uid, &ac.ac_gid); + ac.ac_uid = orig_cred->uid; + ac.ac_gid = orig_cred->gid; #if ACCT_VERSION==2 ac.ac_ahz = AHZ; #endif diff --git a/kernel/exit.c b/kernel/exit.c index 6f50ef55a6f..5962d7ccf24 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code) * an exiting task cleaning up the robust pi futexes. */ smp_mb(); - spin_unlock_wait(&tsk->pi_lock); + raw_spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", diff --git a/kernel/fork.c b/kernel/fork.c index 1415dc4598a..9bd91447e05 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) static void rt_mutex_init_task(struct task_struct *p) { - spin_lock_init(&p->pi_lock); + raw_spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES - plist_head_init(&p->pi_waiters, &p->pi_lock); + plist_head_init_raw(&p->pi_waiters, &p->pi_lock); p->pi_blocked_on = NULL; #endif } diff --git a/kernel/futex.c b/kernel/futex.c index d73ef1f3e55..8e3c3ffe1b9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state) * and has cleaned up the pi_state already */ if (pi_state->owner) { - spin_lock_irq(&pi_state->owner->pi_lock); + raw_spin_lock_irq(&pi_state->owner->pi_lock); list_del_init(&pi_state->list); - spin_unlock_irq(&pi_state->owner->pi_lock); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); } @@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr) * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: */ - spin_lock_irq(&curr->pi_lock); + raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; hb = hash_futex(&key); - spin_unlock_irq(&curr->pi_lock); + raw_spin_unlock_irq(&curr->pi_lock); spin_lock(&hb->lock); - spin_lock_irq(&curr->pi_lock); + raw_spin_lock_irq(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: @@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr) WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); pi_state->owner = NULL; - spin_unlock_irq(&curr->pi_lock); + raw_spin_unlock_irq(&curr->pi_lock); rt_mutex_unlock(&pi_state->pi_mutex); spin_unlock(&hb->lock); - spin_lock_irq(&curr->pi_lock); + raw_spin_lock_irq(&curr->pi_lock); } - spin_unlock_irq(&curr->pi_lock); + raw_spin_unlock_irq(&curr->pi_lock); } static int @@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, * change of the task flags, we do this protected by * p->pi_lock: */ - spin_lock_irq(&p->pi_lock); + raw_spin_lock_irq(&p->pi_lock); if (unlikely(p->flags & PF_EXITING)) { /* * The task is on the way out. When PF_EXITPIDONE is @@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, */ int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; - spin_unlock_irq(&p->pi_lock); + raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); return ret; } @@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; - spin_unlock_irq(&p->pi_lock); + raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); @@ -760,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (!pi_state) return -EINVAL; - spin_lock(&pi_state->pi_mutex.wait_lock); + raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); /* @@ -789,23 +789,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) else if (curval != uval) ret = -EINVAL; if (ret) { - spin_unlock(&pi_state->pi_mutex.wait_lock); + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); return ret; } } - spin_lock_irq(&pi_state->owner->pi_lock); + raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); - spin_unlock_irq(&pi_state->owner->pi_lock); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); - spin_lock_irq(&new_owner->pi_lock); + raw_spin_lock_irq(&new_owner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; - spin_unlock_irq(&new_owner->pi_lock); + raw_spin_unlock_irq(&new_owner->pi_lock); - spin_unlock(&pi_state->pi_mutex.wait_lock); + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); return 0; @@ -1010,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, plist_add(&q->list, &hb2->chain); q->lock_ptr = &hb2->lock; #ifdef CONFIG_DEBUG_PI_LIST - q->list.plist.lock = &hb2->lock; + q->list.plist.spinlock = &hb2->lock; #endif } get_futex_key_refs(key2); @@ -1046,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, q->lock_ptr = &hb->lock; #ifdef CONFIG_DEBUG_PI_LIST - q->list.plist.lock = &hb->lock; + q->list.plist.spinlock = &hb->lock; #endif wake_up_state(q->task, TASK_NORMAL); @@ -1394,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) plist_node_init(&q->list, prio); #ifdef CONFIG_DEBUG_PI_LIST - q->list.plist.lock = &hb->lock; + q->list.plist.spinlock = &hb->lock; #endif plist_add(&q->list, &hb->chain); q->task = current; @@ -1529,18 +1529,18 @@ retry: * itself. */ if (pi_state->owner != NULL) { - spin_lock_irq(&pi_state->owner->pi_lock); + raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); - spin_unlock_irq(&pi_state->owner->pi_lock); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); } pi_state->owner = newowner; - spin_lock_irq(&newowner->pi_lock); + raw_spin_lock_irq(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); - spin_unlock_irq(&newowner->pi_lock); + raw_spin_unlock_irq(&newowner->pi_lock); return 0; /* diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d2f9239dc6b..0086628b6e9 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, for (;;) { base = timer->base; if (likely(base != NULL)) { - spin_lock_irqsave(&base->cpu_base->lock, *flags); + raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); if (likely(base == timer->base)) return base; /* The timer has migrated to another CPU: */ - spin_unlock_irqrestore(&base->cpu_base->lock, *flags); + raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); } cpu_relax(); } @@ -208,13 +208,13 @@ again: /* See the comment in lock_timer_base() */ timer->base = NULL; - spin_unlock(&base->cpu_base->lock); - spin_lock(&new_base->cpu_base->lock); + raw_spin_unlock(&base->cpu_base->lock); + raw_spin_lock(&new_base->cpu_base->lock); if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { cpu = this_cpu; - spin_unlock(&new_base->cpu_base->lock); - spin_lock(&base->cpu_base->lock); + raw_spin_unlock(&new_base->cpu_base->lock); + raw_spin_lock(&base->cpu_base->lock); timer->base = base; goto again; } @@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { struct hrtimer_clock_base *base = timer->base; - spin_lock_irqsave(&base->cpu_base->lock, *flags); + raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); return base; } @@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg) base = &__get_cpu_var(hrtimer_bases); /* Adjust CLOCK_REALTIME offset */ - spin_lock(&base->lock); + raw_spin_lock(&base->lock); base->clock_base[CLOCK_REALTIME].offset = timespec_to_ktime(realtime_offset); hrtimer_force_reprogram(base, 0); - spin_unlock(&base->lock); + raw_spin_unlock(&base->lock); } /* @@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, { if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { if (wakeup) { - spin_unlock(&base->cpu_base->lock); + raw_spin_unlock(&base->cpu_base->lock); raise_softirq_irqoff(HRTIMER_SOFTIRQ); - spin_lock(&base->cpu_base->lock); + raw_spin_lock(&base->cpu_base->lock); } else __raise_softirq_irqoff(HRTIMER_SOFTIRQ); @@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer) static inline void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { - spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); + raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); } /** @@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void) unsigned long flags; int i; - spin_lock_irqsave(&cpu_base->lock, flags); + raw_spin_lock_irqsave(&cpu_base->lock, flags); if (!hrtimer_hres_active()) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { @@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void) } } - spin_unlock_irqrestore(&cpu_base->lock, flags); + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); if (mindelta.tv64 < 0) mindelta.tv64 = 0; @@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) * they get migrated to another cpu, therefore its safe to unlock * the timer base. */ - spin_unlock(&cpu_base->lock); + raw_spin_unlock(&cpu_base->lock); trace_hrtimer_expire_entry(timer, now); restart = fn(timer); trace_hrtimer_expire_exit(timer); - spin_lock(&cpu_base->lock); + raw_spin_lock(&cpu_base->lock); /* * Note: We clear the CALLBACK bit after enqueue_hrtimer and @@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) retry: expires_next.tv64 = KTIME_MAX; - spin_lock(&cpu_base->lock); + raw_spin_lock(&cpu_base->lock); /* * We set expires_next to KTIME_MAX here with cpu_base->lock * held to prevent that a timer is enqueued in our queue via @@ -1317,7 +1317,7 @@ retry: * against it. */ cpu_base->expires_next = expires_next; - spin_unlock(&cpu_base->lock); + raw_spin_unlock(&cpu_base->lock); /* Reprogramming necessary ? */ if (expires_next.tv64 == KTIME_MAX || @@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void) gettime = 0; } - spin_lock(&cpu_base->lock); + raw_spin_lock(&cpu_base->lock); while ((node = base->first)) { struct hrtimer *timer; @@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void) __run_hrtimer(timer, &base->softirq_time); } - spin_unlock(&cpu_base->lock); + raw_spin_unlock(&cpu_base->lock); } } @@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; - spin_lock_init(&cpu_base->lock); + raw_spin_lock_init(&cpu_base->lock); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) cpu_base->clock_base[i].cpu_base = cpu_base; @@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu) * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. */ - spin_lock(&new_base->lock); - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&new_base->lock); + raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { migrate_hrtimer_list(&old_base->clock_base[i], &new_base->clock_base[i]); } - spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); + raw_spin_unlock(&old_base->lock); + raw_spin_unlock(&new_base->lock); /* Check, if we got expired work to do */ __hrtimer_peek_ahead_timers(); diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 366eedf949c..dbcbf6a33a0 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk) list = &ctx->event_list; - spin_lock_irqsave(&ctx->lock, flags); + raw_spin_lock_irqsave(&ctx->lock, flags); /* * The current breakpoint counter is not included in the list @@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk) count++; } - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); return count; } diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 1de9700f416..2295a31ef11 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -45,7 +45,7 @@ unsigned long probe_irq_on(void) * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { /* * An old-style architecture might still have @@ -61,7 +61,7 @@ unsigned long probe_irq_on(void) desc->chip->set_type(i, IRQ_TYPE_PROBE); desc->chip->startup(i); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ @@ -73,13 +73,13 @@ unsigned long probe_irq_on(void) * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; if (desc->chip->startup(i)) desc->status |= IRQ_PENDING; } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* @@ -91,7 +91,7 @@ unsigned long probe_irq_on(void) * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { @@ -103,7 +103,7 @@ unsigned long probe_irq_on(void) if (i < 32) mask |= 1 << i; } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } return mask; @@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val) int i; for_each_irq_desc(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { @@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val) desc->status = status & ~IRQ_AUTODETECT; desc->chip->shutdown(i); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); @@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val) unsigned int status; for_each_irq_desc(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { @@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val) desc->status = status & ~IRQ_AUTODETECT; desc->chip->shutdown(i); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index ba566c261ad..ecc3fa28f66 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq) } /* Ensure we don't have left over values from a previous use of this irq */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status = IRQ_DISABLED; desc->chip = &no_irq_chip; desc->handle_irq = handle_bad_irq; @@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq) cpumask_clear(desc->pending_mask); #endif #endif - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } /** @@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq) return; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action) { - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", irq); return; @@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq) desc->chip = &no_irq_chip; desc->name = NULL; clear_kstat_irqs(desc); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } @@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) if (!chip) chip = &no_irq_chip; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); irq_chip_set_defaults(chip); desc->chip = chip; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type) if (type == IRQ_TYPE_NONE) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); ret = __irq_set_trigger(desc, irq, type); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } EXPORT_SYMBOL(set_irq_type); @@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data) return -EINVAL; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->handler_data = data; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } EXPORT_SYMBOL(set_irq_data); @@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) return -EINVAL; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->msi_desc = entry; if (entry) entry->irq = irq; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data) return -EINVAL; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->chip_data = data; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest) if (!desc) return; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (nest) desc->status |= IRQ_NESTED_THREAD; else desc->status &= ~IRQ_NESTED_THREAD; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL_GPL(set_irq_nested_thread); @@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq) might_sleep(); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); kstat_incr_irqs_this_cpu(irq, desc); @@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq) goto out_unlock; desc->status |= IRQ_INPROGRESS; - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } EXPORT_SYMBOL_GPL(handle_nested_irq); @@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; @@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) goto out_unlock; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } /** @@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); mask_ack_irq(desc, irq); if (unlikely(desc->status & IRQ_INPROGRESS)) @@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) goto out_unlock; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; if (unlikely(desc->status & IRQ_ONESHOT)) @@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(irq); out_unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_level_irq); @@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out; @@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) desc->status |= IRQ_INPROGRESS; desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out: desc->chip->eoi(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } /** @@ -530,7 +530,7 @@ out: void handle_edge_irq(unsigned int irq, struct irq_desc *desc) { - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); @@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) } desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } /** @@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, } chip_bus_lock(irq, desc); - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); /* Uninstall? */ if (handle == handle_bad_irq) { @@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->depth = 0; desc->chip->startup(irq); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); } EXPORT_SYMBOL_GPL(__set_irq_handler); @@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq) return; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOPROBE; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } void __init set_irq_probe(unsigned int irq) @@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq) return; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status &= ~IRQ_NOPROBE; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 17c71bb565c..814940e7f48 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = { .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), }; void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) @@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) { memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); - spin_lock_init(&desc->lock); + raw_spin_lock_init(&desc->lock); desc->irq = irq; #ifdef CONFIG_SMP desc->node = node; @@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) /* * Protect the sparse_irqs: */ -DEFINE_SPINLOCK(sparse_irq_lock); +DEFINE_RAW_SPINLOCK(sparse_irq_lock); struct irq_desc **irq_desc_ptrs __read_mostly; @@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), } }; @@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) if (desc) return desc; - spin_lock_irqsave(&sparse_irq_lock, flags); + raw_spin_lock_irqsave(&sparse_irq_lock, flags); /* We have to check it to avoid races with another CPU */ desc = irq_desc_ptrs[irq]; @@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) irq_desc_ptrs[irq] = desc; out_unlock: - spin_unlock_irqrestore(&sparse_irq_lock, flags); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); return desc; } @@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), } }; @@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq) return 1; } - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->chip->ack) desc->chip->ack(irq); /* @@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq) for (;;) { irqreturn_t action_ret; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (likely(!(desc->status & IRQ_PENDING))) break; desc->status &= ~IRQ_PENDING; @@ -536,7 +536,7 @@ out: * disabled while the handler was running. */ desc->chip->end(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); return 1; } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 1b5d742c6a7..b2821f070a3 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); extern struct lock_class_key irq_desc_lock_class; extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); extern void clear_kstat_irqs(struct irq_desc *desc); -extern spinlock_t sparse_irq_lock; +extern raw_spinlock_t sparse_irq_lock; #ifdef CONFIG_SPARSE_IRQ /* irq_desc_ptrs allocated at boot time */ diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7305b297d1e..eb6078ca60c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); status = desc->status; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); /* Oops, that failed? */ } while (status & IRQ_INPROGRESS); @@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) if (!desc->chip->set_affinity) return -EINVAL; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PCNTXT) { @@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) } #endif desc->status |= IRQ_AFFINITY_SET; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq) unsigned long flags; int ret; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); ret = setup_affinity(irq, desc); if (!ret) irq_set_thread_affinity(desc); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } @@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq) return; chip_bus_lock(irq, desc); - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __disable_irq(desc, irq, false); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); } EXPORT_SYMBOL(disable_irq_nosync); @@ -308,9 +308,9 @@ void enable_irq(unsigned int irq) return; chip_bus_lock(irq, desc); - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, false); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); } EXPORT_SYMBOL(enable_irq); @@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on) /* wakeup-capable irqs can be shared between drivers that * don't need to have the same sleep mode behaviors. */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (on) { if (desc->wake_depth++ == 0) { ret = set_irq_wake_real(irq, on); @@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on) } } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } EXPORT_SYMBOL(set_irq_wake); @@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct irqaction *action) static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) { chip_bus_lock(irq, desc); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { desc->status &= ~IRQ_MASKED; desc->chip->unmask(irq); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(irq, desc); } @@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) return; } - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); cpumask_copy(mask, desc->affinity); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); @@ -545,7 +545,7 @@ static int irq_thread(void *data) atomic_inc(&desc->threads_active); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (unlikely(desc->status & IRQ_DISABLED)) { /* * CHECKME: We might need a dedicated @@ -555,9 +555,9 @@ static int irq_thread(void *data) * retriggers the interrupt itself --- tglx */ desc->status |= IRQ_PENDING; - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } else { - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); action->thread_fn(action->irq, action->dev_id); @@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) /* * The following block of code has to be executed atomically */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); old_ptr = &desc->action; old = *old_ptr; if (old) { @@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) __enable_irq(desc, irq, false); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); /* * Strictly no need to wake it up, but hung_task complains @@ -802,7 +802,7 @@ mismatch: ret = -EBUSY; out_thread: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); if (new->thread) { struct task_struct *t = new->thread; @@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) if (!desc) return NULL; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); /* * There can be multiple actions per IRQ descriptor, find the right @@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) if (!action) { WARN(1, "Trying to free already-free IRQ %d\n", irq); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return NULL; } @@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) desc->chip->disable(irq); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index fcb6c96f262..24196228083 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -27,7 +27,7 @@ void move_masked_irq(int irq) if (!desc->chip->set_affinity) return; - assert_spin_locked(&desc->lock); + assert_raw_spin_locked(&desc->lock); /* * If there was a valid mask to work with, please diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 3fd30197da2..26bac9d8f86 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, "for migration.\n", irq); return false; } - spin_lock_init(&desc->lock); + raw_spin_lock_init(&desc->lock); desc->node = node; lockdep_set_class(&desc->lock, &irq_desc_lock_class); init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); @@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, irq = old_desc->irq; - spin_lock_irqsave(&sparse_irq_lock, flags); + raw_spin_lock_irqsave(&sparse_irq_lock, flags); /* We have to check it to avoid races with another CPU */ desc = irq_desc_ptrs[irq]; @@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, } irq_desc_ptrs[irq] = desc; - spin_unlock_irqrestore(&sparse_irq_lock, flags); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); /* free the old one */ free_one_irq_desc(old_desc, desc); @@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, return desc; out_unlock: - spin_unlock_irqrestore(&sparse_irq_lock, flags); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); return desc; } diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index a0bb09e7986..0d4005d85b0 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -28,9 +28,9 @@ void suspend_device_irqs(void) for_each_irq_desc(irq, desc) { unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __disable_irq(desc, irq, true); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } for_each_irq_desc(irq, desc) @@ -56,9 +56,9 @@ void resume_device_irqs(void) if (!(desc->status & IRQ_SUSPENDED)) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, true); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } EXPORT_SYMBOL_GPL(resume_device_irqs); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 0832145fea9..6f50eccc79c 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -179,7 +179,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) unsigned long flags; int ret = 1; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for (action = desc->action ; action; action = action->next) { if ((action != new_action) && action->name && !strcmp(new_action->name, action->name)) { @@ -187,7 +187,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) break; } } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index e49ea1c5232..89fb90ae534 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) struct irqaction *action; int ok = 0, work = 0; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); /* Already running on another processor */ if (desc->status & IRQ_INPROGRESS) { /* @@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc) */ if (desc->action && (desc->action->flags & IRQF_SHARED)) desc->status |= IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); return ok; } /* Honour the normal IRQ locking */ desc->status |= IRQ_INPROGRESS; action = desc->action; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); while (action) { /* Only shared IRQ handlers are safe to call */ @@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) } local_irq_disable(); /* Now clean up the flags */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); action = desc->action; /* @@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc) * Perform real IRQ processing for the IRQ we deferred */ work = 1; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); handle_IRQ_event(irq, action); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_PENDING; } desc->status &= ~IRQ_INPROGRESS; @@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) */ if (work && desc->chip && desc->chip->end) desc->chip->end(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); return ok; } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 429540c70d3..5feaddcdbe4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); /* * Make sure that if another CPU detected a bug while * walking the graph we dont change it (while the other @@ -85,7 +85,7 @@ static int graph_lock(void) * dropped already) */ if (!debug_locks) { - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return 0; } /* prevent any recursions within lockdep from causing deadlocks */ @@ -95,11 +95,11 @@ static int graph_lock(void) static inline int graph_unlock(void) { - if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) + if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) return DEBUG_LOCKS_WARN_ON(1); current->lockdep_recursion--; - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return 0; } @@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void) { int ret = debug_locks_off(); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return ret; } @@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.class = class; local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; @@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.class = class; local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index 6b2d735846a..57d527a16f9 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h @@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock) \ DEBUG_LOCKS_WARN_ON(in_interrupt()); \ local_irq_save(flags); \ - __raw_spin_lock(&(lock)->raw_lock); \ + arch_spin_lock(&(lock)->rlock.raw_lock);\ DEBUG_LOCKS_WARN_ON(l->magic != l); \ } while (0) -#define spin_unlock_mutex(lock, flags) \ - do { \ - __raw_spin_unlock(&(lock)->raw_lock); \ - local_irq_restore(flags); \ - preempt_check_resched(); \ +#define spin_unlock_mutex(lock, flags) \ + do { \ + arch_spin_unlock(&(lock)->rlock.raw_lock); \ + local_irq_restore(flags); \ + preempt_check_resched(); \ } while (0) diff --git a/kernel/params.c b/kernel/params.c index d656c276508..cf1b6918312 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -24,6 +24,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/ctype.h> +#include <linux/string.h> #if 0 #define DEBUGP printk @@ -122,9 +123,7 @@ static char *next_arg(char *args, char **param, char **val) next = args + i; /* Chew up trailing spaces. */ - while (isspace(*next)) - next++; - return next; + return skip_spaces(next); } /* Args looks like "foo=bar,bar2 baz=fuz wiz". */ @@ -139,8 +138,7 @@ int parse_args(const char *name, DEBUGP("Parsing ARGS: %s\n", args); /* Chew leading spaces */ - while (isspace(*args)) - args++; + args = skip_spaces(args); while (*args) { int ret; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e73e53c7582..9052d6c8c9f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) * if so. If we locked the right context, then it * can't get swapped on us any more. */ - spin_lock_irqsave(&ctx->lock, *flags); + raw_spin_lock_irqsave(&ctx->lock, *flags); if (ctx != rcu_dereference(task->perf_event_ctxp)) { - spin_unlock_irqrestore(&ctx->lock, *flags); + raw_spin_unlock_irqrestore(&ctx->lock, *flags); goto retry; } if (!atomic_inc_not_zero(&ctx->refcount)) { - spin_unlock_irqrestore(&ctx->lock, *flags); + raw_spin_unlock_irqrestore(&ctx->lock, *flags); ctx = NULL; } } @@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task ctx = perf_lock_task_context(task, &flags); if (ctx) { ++ctx->pin_count; - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); } return ctx; } @@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx) { unsigned long flags; - spin_lock_irqsave(&ctx->lock, flags); + raw_spin_lock_irqsave(&ctx->lock, flags); --ctx->pin_count; - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); put_ctx(ctx); } @@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); /* * Protect the list operation against NMI by disabling the * events on a global level. @@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info) } perf_enable(); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } @@ -488,12 +488,12 @@ retry: task_oncpu_function_call(task, __perf_event_remove_from_context, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * If the context is active we need to retry the smp call. */ if (ctx->nr_active && !list_empty(&event->group_entry)) { - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); goto retry; } @@ -504,7 +504,7 @@ retry: */ if (!list_empty(&event->group_entry)) list_del_event(event, ctx); - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } /* @@ -535,7 +535,7 @@ static void __perf_event_disable(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); /* * If the event is on, turn it off. @@ -551,7 +551,7 @@ static void __perf_event_disable(void *info) event->state = PERF_EVENT_STATE_OFF; } - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event) retry: task_oncpu_function_call(task, __perf_event_disable, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * If the event is still active, we need to retry the cross-call. */ if (event->state == PERF_EVENT_STATE_ACTIVE) { - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); goto retry; } @@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event) event->state = PERF_EVENT_STATE_OFF; } - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } static int @@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info) cpuctx->task_ctx = ctx; } - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); @@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info) unlock: perf_enable(); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -856,12 +856,12 @@ retry: task_oncpu_function_call(task, __perf_install_in_context, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * we need to retry the smp call. */ if (ctx->is_active && list_empty(&event->group_entry)) { - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); goto retry; } @@ -872,7 +872,7 @@ retry: */ if (list_empty(&event->group_entry)) add_event_to_ctx(event, ctx); - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } /* @@ -917,7 +917,7 @@ static void __perf_event_enable(void *info) cpuctx->task_ctx = ctx; } - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); @@ -959,7 +959,7 @@ static void __perf_event_enable(void *info) } unlock: - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event) return; } - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); if (event->state >= PERF_EVENT_STATE_INACTIVE) goto out; @@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event) event->state = PERF_EVENT_STATE_OFF; retry: - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); task_oncpu_function_call(task, __perf_event_enable, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * If the context is active and the event is still off, @@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event) __perf_event_mark_enabled(event, ctx); out: - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } static int perf_event_refresh(struct perf_event *event, int refresh) @@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx, { struct perf_event *event; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 0; if (likely(!ctx->nr_events)) goto out; @@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx, } perf_enable(); out: - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task, * order we take the locks because no other cpu could * be trying to lock both of these tasks. */ - spin_lock(&ctx->lock); - spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&ctx->lock); + raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { /* * XXX do we need a memory barrier of sorts @@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task, perf_event_sync_stat(ctx, next_ctx); } - spin_unlock(&next_ctx->lock); - spin_unlock(&ctx->lock); + raw_spin_unlock(&next_ctx->lock); + raw_spin_unlock(&ctx->lock); } rcu_read_unlock(); @@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx, struct perf_event *event; int can_add_hw = 1; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 1; if (likely(!ctx->nr_events)) goto out; @@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx, } perf_enable(); out: - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) struct hw_perf_event *hwc; u64 interrupts, freq; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; @@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) perf_enable(); } } - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx) if (!ctx->nr_events) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); /* * Rotate the first entry last (works just fine for group events too): */ @@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx) } perf_enable(); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } void perf_event_task_tick(struct task_struct *curr, int cpu) @@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) __perf_event_task_sched_out(ctx); - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->group_list, group_entry) { if (!event->attr.enable_on_exec) @@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) if (enabled) unclone_ctx(ctx); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); perf_event_task_sched_in(task, smp_processor_id()); out: @@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); update_context_time(ctx); update_event_times(event); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); event->pmu->read(event); } @@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event) struct perf_event_context *ctx = event->ctx; unsigned long flags; - spin_lock_irqsave(&ctx->lock, flags); + raw_spin_lock_irqsave(&ctx->lock, flags); update_context_time(ctx); update_event_times(event); - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); } return atomic64_read(&event->count); @@ -1579,7 +1579,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx, struct task_struct *task) { - spin_lock_init(&ctx->lock); + raw_spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->group_list); INIT_LIST_HEAD(&ctx->event_list); @@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) ctx = perf_lock_task_context(task, &flags); if (ctx) { unclone_ctx(ctx); - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); } if (!ctx) { @@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) if (!value) return -EINVAL; - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); if (event->attr.freq) { if (value > sysctl_perf_event_sample_rate) { ret = -EINVAL; @@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) event->hw.sample_period = value; } unlock: - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); return ret; } @@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child) * reading child->perf_event_ctxp, we wait until it has * incremented the context's refcount before we do put_ctx below. */ - spin_lock(&child_ctx->lock); + raw_spin_lock(&child_ctx->lock); child->perf_event_ctxp = NULL; /* * If this context is a clone; unclone it so it can't get @@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child) */ unclone_ctx(child_ctx); update_context_time(child_ctx); - spin_unlock_irqrestore(&child_ctx->lock, flags); + raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* * Report the task dead after unscheduling the events so that we @@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class, perf_reserved_percpu = val; for_each_online_cpu(cpu) { cpuctx = &per_cpu(perf_cpu_context, cpu); - spin_lock_irq(&cpuctx->ctx.lock); + raw_spin_lock_irq(&cpuctx->ctx.lock); mpt = min(perf_max_events - cpuctx->ctx.nr_events, perf_max_events - perf_reserved_percpu); cpuctx->max_pertask = mpt; - spin_unlock_irq(&cpuctx->ctx.lock); + raw_spin_unlock_irq(&cpuctx->ctx.lock); } spin_unlock(&perf_resource_lock); diff --git a/kernel/power/console.c b/kernel/power/console.c index 5187136fe1d..218e5af9015 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -6,7 +6,7 @@ #include <linux/vt_kern.h> #include <linux/kbd_kern.h> -#include <linux/console.h> +#include <linux/vt.h> #include <linux/module.h> #include "power.h" @@ -21,8 +21,7 @@ int pm_prepare_console(void) if (orig_fgconsole < 0) return 1; - orig_kmsg = kmsg_redirect; - kmsg_redirect = SUSPEND_CONSOLE; + orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE); return 0; } @@ -30,7 +29,7 @@ void pm_restore_console(void) { if (orig_fgconsole >= 0) { vt_move_to_console(orig_fgconsole, 0); - kmsg_redirect = orig_kmsg; + vt_kmsg_redirect(orig_kmsg); } } #endif diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 5fcb4fe645e..ddabb54bb5c 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -37,8 +37,8 @@ do { \ if (rt_trace_on) { \ rt_trace_on = 0; \ console_verbose(); \ - if (spin_is_locked(¤t->pi_lock)) \ - spin_unlock(¤t->pi_lock); \ + if (raw_spin_is_locked(¤t->pi_lock)) \ + raw_spin_unlock(¤t->pi_lock); \ } \ } while (0) diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 29bd4baf9e7..a9604815786 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task) { unsigned long flags; - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); } /* @@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* * Task can not go away as we did a get_task() before ! */ - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; /* @@ -231,8 +231,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, goto out_unlock_pi; lock = waiter->lock; - if (!spin_trylock(&lock->wait_lock)) { - spin_unlock_irqrestore(&task->pi_lock, flags); + if (!raw_spin_trylock(&lock->wait_lock)) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); cpu_relax(); goto retry; } @@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* Deadlock detection */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); ret = deadlock_detect ? -EDEADLK : 0; goto out_unlock_pi; } @@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, plist_add(&waiter->list_entry, &lock->wait_list); /* Release the task */ - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); put_task_struct(task); /* Grab the next task */ task = rt_mutex_owner(lock); get_task_struct(task); - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { /* Boost the owner */ @@ -277,10 +277,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, __rt_mutex_adjust_prio(task); } - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); if (!detect_deadlock && waiter != top_waiter) goto out_put_task; @@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, goto again; out_unlock_pi: - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); @@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, if (pendowner == task) return 1; - spin_lock_irqsave(&pendowner->pi_lock, flags); + raw_spin_lock_irqsave(&pendowner->pi_lock, flags); if (task->prio >= pendowner->prio) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); return 0; } @@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, * priority. */ if (likely(!rt_mutex_has_waiters(lock))) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); return 1; } @@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, next = rt_mutex_top_waiter(lock); plist_del(&next->pi_list_entry, &pendowner->pi_waiters); __rt_mutex_adjust_prio(pendowner); - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); /* * We are going to steal the lock and a waiter was @@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, * might be task: */ if (likely(next->task != task)) { - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); plist_add(&next->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); } return 1; } @@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, unsigned long flags; int chain_walk = 0, res; - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; @@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, task->pi_blocked_on = waiter; - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { - spin_lock_irqsave(&owner->pi_lock, flags); + raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; @@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, task); - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); return res; } @@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) struct task_struct *pendowner; unsigned long flags; - spin_lock_irqsave(¤t->pi_lock, flags); + raw_spin_lock_irqsave(¤t->pi_lock, flags); waiter = rt_mutex_top_waiter(lock); plist_del(&waiter->list_entry, &lock->wait_list); @@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); - spin_unlock_irqrestore(¤t->pi_lock, flags); + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); /* * Clear the pi_blocked_on variable and enqueue a possible @@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) * waiter with higher priority than pending-owner->normal_prio * is blocked on the unboosted (pending) owner. */ - spin_lock_irqsave(&pendowner->pi_lock, flags); + raw_spin_lock_irqsave(&pendowner->pi_lock, flags); WARN_ON(!pendowner->pi_blocked_on); WARN_ON(pendowner->pi_blocked_on != waiter); @@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &pendowner->pi_waiters); } - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); wake_up_process(pendowner); } @@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock, unsigned long flags; int chain_walk = 0; - spin_lock_irqsave(¤t->pi_lock, flags); + raw_spin_lock_irqsave(¤t->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); waiter->task = NULL; current->pi_blocked_on = NULL; - spin_unlock_irqrestore(¤t->pi_lock, flags); + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); if (first && owner != current) { - spin_lock_irqsave(&owner->pi_lock, flags); + raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&waiter->pi_list_entry, &owner->pi_waiters); @@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock, if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); @@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock, /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); } /* @@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task) struct rt_mutex_waiter *waiter; unsigned long flags; - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; if (!waiter || waiter->list_entry.prio == task->prio) { - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); @@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, break; } - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); if (waiter->task) schedule_rt_mutex(lock); - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); set_current_state(state); } @@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return 0; } @@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, */ fixup_rt_mutex_waiters(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); /* Remove pending timer: */ if (unlikely(timeout)) @@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); if (likely(rt_mutex_owner(lock) != current)) { @@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) fixup_rt_mutex_waiters(lock); } - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return ret; } @@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); debug_rt_mutex_unlock(lock); @@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock) if (!rt_mutex_has_waiters(lock)) { lock->owner = NULL; - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return; } wakeup_next_waiter(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); /* Undo pi boosting if necessary: */ rt_mutex_adjust_prio(current); @@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; - spin_lock_init(&lock->wait_lock); - plist_head_init(&lock->wait_list, &lock->wait_lock); + raw_spin_lock_init(&lock->wait_lock); + plist_head_init_raw(&lock->wait_list, &lock->wait_lock); debug_rt_mutex_init(lock, name); } @@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, { int ret; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); mark_rt_mutex_waiters(lock); @@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, /* We got the lock for task. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task, 0); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); rt_mutex_deadlock_account_lock(lock, task); return 1; } @@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, */ ret = 0; } - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); @@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, { int ret; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, */ fixup_rt_mutex_waiters(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); /* * Readjust priority, when we did not get the lock. We might have been diff --git a/kernel/sched.c b/kernel/sched.c index fd05861b211..18cceeecce3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -141,7 +141,7 @@ struct rt_prio_array { struct rt_bandwidth { /* nests inside the rq lock: */ - spinlock_t rt_runtime_lock; + raw_spinlock_t rt_runtime_lock; ktime_t rt_period; u64 rt_runtime; struct hrtimer rt_period_timer; @@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) rt_b->rt_period = ns_to_ktime(period); rt_b->rt_runtime = runtime; - spin_lock_init(&rt_b->rt_runtime_lock); + raw_spin_lock_init(&rt_b->rt_runtime_lock); hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) if (hrtimer_active(&rt_b->rt_period_timer)) return; - spin_lock(&rt_b->rt_runtime_lock); + raw_spin_lock(&rt_b->rt_runtime_lock); for (;;) { unsigned long delta; ktime_t soft, hard; @@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, HRTIMER_MODE_ABS_PINNED, 0); } - spin_unlock(&rt_b->rt_runtime_lock); + raw_spin_unlock(&rt_b->rt_runtime_lock); } #ifdef CONFIG_RT_GROUP_SCHED @@ -470,7 +470,7 @@ struct rt_rq { u64 rt_time; u64 rt_runtime; /* Nests inside the rq lock: */ - spinlock_t rt_runtime_lock; + raw_spinlock_t rt_runtime_lock; #ifdef CONFIG_RT_GROUP_SCHED unsigned long rt_nr_boosted; @@ -525,7 +525,7 @@ static struct root_domain def_root_domain; */ struct rq { /* runqueue lock: */ - spinlock_t lock; + raw_spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because @@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq) */ int runqueue_is_locked(int cpu) { - return spin_is_locked(&cpu_rq(cpu)->lock); + return raw_spin_is_locked(&cpu_rq(cpu)->lock); } /* @@ -893,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) */ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ @@ -917,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) next->oncpu = 1; #endif #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); #else - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); #endif } @@ -949,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) { for (;;) { struct rq *rq = task_rq(p); - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p))) return rq; - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); } } @@ -969,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) for (;;) { local_irq_save(*flags); rq = task_rq(p); - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p))) return rq; - spin_unlock_irqrestore(&rq->lock, *flags); + raw_spin_unlock_irqrestore(&rq->lock, *flags); } } @@ -981,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p) struct rq *rq = task_rq(p); smp_mb(); /* spin-unlock-wait is not a full memory barrier */ - spin_unlock_wait(&rq->lock); + raw_spin_unlock_wait(&rq->lock); } static void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); } static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) __releases(rq->lock) { - spin_unlock_irqrestore(&rq->lock, *flags); + raw_spin_unlock_irqrestore(&rq->lock, *flags); } /* @@ -1006,7 +1006,7 @@ static struct rq *this_rq_lock(void) local_irq_disable(); rq = this_rq(); - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); return rq; } @@ -1053,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); update_rq_clock(rq); rq->curr->sched_class->task_tick(rq, rq->curr, 1); - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); return HRTIMER_NORESTART; } @@ -1069,10 +1069,10 @@ static void __hrtick_start(void *arg) { struct rq *rq = arg; - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); hrtimer_restart(&rq->hrtick_timer); rq->hrtick_csd_pending = 0; - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); } /* @@ -1179,7 +1179,7 @@ static void resched_task(struct task_struct *p) { int cpu; - assert_spin_locked(&task_rq(p)->lock); + assert_raw_spin_locked(&task_rq(p)->lock); if (test_tsk_need_resched(p)) return; @@ -1201,10 +1201,10 @@ static void resched_cpu(int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; - if (!spin_trylock_irqsave(&rq->lock, flags)) + if (!raw_spin_trylock_irqsave(&rq->lock, flags)) return; resched_task(cpu_curr(cpu)); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } #ifdef CONFIG_NO_HZ @@ -1273,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) #else /* !CONFIG_SMP */ static void resched_task(struct task_struct *p) { - assert_spin_locked(&task_rq(p)->lock); + assert_raw_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } @@ -1600,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, struct rq *rq = cpu_rq(cpu); unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; tg->cfs_rq[cpu]->shares = boost ? 0 : shares; __set_se_shares(tg->se[cpu], shares); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } } @@ -1706,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) if (root_task_group_empty()) return; - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); update_shares(sd); - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); } static void update_h_load(long cpu) @@ -1748,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __acquires(busiest->lock) __acquires(this_rq->lock) { - spin_unlock(&this_rq->lock); + raw_spin_unlock(&this_rq->lock); double_rq_lock(this_rq, busiest); return 1; @@ -1769,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) { int ret = 0; - if (unlikely(!spin_trylock(&busiest->lock))) { + if (unlikely(!raw_spin_trylock(&busiest->lock))) { if (busiest < this_rq) { - spin_unlock(&this_rq->lock); - spin_lock(&busiest->lock); - spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); + raw_spin_unlock(&this_rq->lock); + raw_spin_lock(&busiest->lock); + raw_spin_lock_nested(&this_rq->lock, + SINGLE_DEPTH_NESTING); ret = 1; } else - spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_nested(&busiest->lock, + SINGLE_DEPTH_NESTING); } return ret; } @@ -1790,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) { if (unlikely(!irqs_disabled())) { /* printk() doesn't work good under rq->lock */ - spin_unlock(&this_rq->lock); + raw_spin_unlock(&this_rq->lock); BUG_ON(1); } @@ -1800,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock) { - spin_unlock(&busiest->lock); + raw_spin_unlock(&busiest->lock); lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); } #endif @@ -2023,13 +2025,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) return; } - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); update_rq_clock(rq); set_task_cpu(p, cpu); p->cpus_allowed = cpumask_of_cpu(cpu); p->rt.nr_cpus_allowed = 1; p->flags |= PF_THREAD_BOUND; - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } EXPORT_SYMBOL(kthread_bind); @@ -2781,10 +2783,10 @@ static inline void post_schedule(struct rq *rq) if (rq->post_schedule) { unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); if (rq->curr->sched_class->post_schedule) rq->curr->sched_class->post_schedule(rq); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); rq->post_schedule = 0; } @@ -3066,15 +3068,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) { BUG_ON(!irqs_disabled()); if (rq1 == rq2) { - spin_lock(&rq1->lock); + raw_spin_lock(&rq1->lock); __acquire(rq2->lock); /* Fake it out ;) */ } else { if (rq1 < rq2) { - spin_lock(&rq1->lock); - spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&rq1->lock); + raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); } else { - spin_lock(&rq2->lock); - spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&rq2->lock); + raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); } } update_rq_clock(rq1); @@ -3091,9 +3093,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { - spin_unlock(&rq1->lock); + raw_spin_unlock(&rq1->lock); if (rq1 != rq2) - spin_unlock(&rq2->lock); + raw_spin_unlock(&rq2->lock); else __release(rq2->lock); } @@ -4186,14 +4188,15 @@ redo: if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { - spin_lock_irqsave(&busiest->lock, flags); + raw_spin_lock_irqsave(&busiest->lock, flags); /* don't kick the migration_thread, if the curr * task on busiest cpu can't be moved to this_cpu */ if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { - spin_unlock_irqrestore(&busiest->lock, flags); + raw_spin_unlock_irqrestore(&busiest->lock, + flags); all_pinned = 1; goto out_one_pinned; } @@ -4203,7 +4206,7 @@ redo: busiest->push_cpu = this_cpu; active_balance = 1; } - spin_unlock_irqrestore(&busiest->lock, flags); + raw_spin_unlock_irqrestore(&busiest->lock, flags); if (active_balance) wake_up_process(busiest->migration_thread); @@ -4385,10 +4388,10 @@ redo: /* * Should not call ttwu while holding a rq->lock */ - spin_unlock(&this_rq->lock); + raw_spin_unlock(&this_rq->lock); if (active_balance) wake_up_process(busiest->migration_thread); - spin_lock(&this_rq->lock); + raw_spin_lock(&this_rq->lock); } else sd->nr_balance_failed = 0; @@ -5257,11 +5260,11 @@ void scheduler_tick(void) sched_clock_tick(); - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); update_rq_clock(rq); update_cpu_load(rq); curr->sched_class->task_tick(rq, curr, 0); - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); perf_event_task_tick(curr, cpu); @@ -5455,7 +5458,7 @@ need_resched_nonpreemptible: if (sched_feat(HRTICK)) hrtick_clear(rq); - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); update_rq_clock(rq); clear_tsk_need_resched(prev); @@ -5491,7 +5494,7 @@ need_resched_nonpreemptible: cpu = smp_processor_id(); rq = cpu_rq(cpu); } else - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); post_schedule(rq); @@ -6320,7 +6323,7 @@ recheck: * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: */ - spin_lock_irqsave(&p->pi_lock, flags); + raw_spin_lock_irqsave(&p->pi_lock, flags); /* * To be able to change p->policy safely, the apropriate * runqueue lock must be held. @@ -6330,7 +6333,7 @@ recheck: if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; __task_rq_unlock(rq); - spin_unlock_irqrestore(&p->pi_lock, flags); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); goto recheck; } update_rq_clock(rq); @@ -6354,7 +6357,7 @@ recheck: check_class_changed(rq, p, prev_class, oldprio, running); } __task_rq_unlock(rq); - spin_unlock_irqrestore(&p->pi_lock, flags); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); rt_mutex_adjust_pi(p); @@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield) */ __release(rq->lock); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); - _raw_spin_unlock(&rq->lock); + do_raw_spin_unlock(&rq->lock); preempt_enable_no_resched(); schedule(); @@ -6980,7 +6983,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); __sched_fork(idle); idle->se.exec_start = sched_clock(); @@ -6992,7 +6995,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) idle->oncpu = 1; #endif - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ #if defined(CONFIG_PREEMPT) @@ -7209,10 +7212,10 @@ static int migration_thread(void *data) struct migration_req *req; struct list_head *head; - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); if (cpu_is_offline(cpu)) { - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); break; } @@ -7224,7 +7227,7 @@ static int migration_thread(void *data) head = &rq->migration_queue; if (list_empty(head)) { - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); schedule(); set_current_state(TASK_INTERRUPTIBLE); continue; @@ -7233,14 +7236,14 @@ static int migration_thread(void *data) list_del_init(head->next); if (req->task != NULL) { - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); __migrate_task(req->task, cpu, req->dest_cpu); } else if (likely(cpu == (badcpu = smp_processor_id()))) { req->dest_cpu = RCU_MIGRATION_GOT_QS; - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); } else { req->dest_cpu = RCU_MIGRATION_MUST_SYNC; - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); } local_irq_enable(); @@ -7363,14 +7366,14 @@ void sched_idle_next(void) * Strictly not necessary since rest of the CPUs are stopped by now * and interrupts disabled on the current cpu. */ - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); update_rq_clock(rq); activate_task(rq, p, 0); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } /* @@ -7406,9 +7409,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) * that's OK. No task can be added to this CPU, so iteration is * fine. */ - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); move_task_off_dead_cpu(dead_cpu, p); - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); put_task_struct(p); } @@ -7674,13 +7677,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Update our root-domain */ rq = cpu_rq(cpu); - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_online(rq); } - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); break; #ifdef CONFIG_HOTPLUG_CPU @@ -7705,13 +7708,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) put_task_struct(rq->migration_thread); rq->migration_thread = NULL; /* Idle task back to normal (off runqueue, low prio) */ - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); update_rq_clock(rq); deactivate_task(rq, rq->idle, 0); __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); rq->idle->sched_class = &idle_sched_class; migrate_dead_tasks(cpu); - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); cpuset_unlock(); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); @@ -7721,30 +7724,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) * they didn't take sched_hotcpu_mutex. Just wake up * the requestors. */ - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); while (!list_empty(&rq->migration_queue)) { struct migration_req *req; req = list_entry(rq->migration_queue.next, struct migration_req, list); list_del_init(&req->list); - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); complete(&req->done); - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); } - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); break; case CPU_DYING: case CPU_DYING_FROZEN: /* Update our root-domain */ rq = cpu_rq(cpu); - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); } - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); break; #endif } @@ -7974,7 +7977,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) struct root_domain *old_rd = NULL; unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { old_rd = rq->rd; @@ -8000,7 +8003,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); if (old_rd) free_rootdomain(old_rd); @@ -9357,13 +9360,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) #ifdef CONFIG_SMP rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; - plist_head_init(&rt_rq->pushable_tasks, &rq->lock); + plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); #endif rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; rt_rq->rt_runtime = 0; - spin_lock_init(&rt_rq->rt_runtime_lock); + raw_spin_lock_init(&rt_rq->rt_runtime_lock); #ifdef CONFIG_RT_GROUP_SCHED rt_rq->rt_nr_boosted = 0; @@ -9523,7 +9526,7 @@ void __init sched_init(void) struct rq *rq; rq = cpu_rq(i); - spin_lock_init(&rq->lock); + raw_spin_lock_init(&rq->lock); rq->nr_running = 0; rq->calc_load_active = 0; rq->calc_load_update = jiffies + LOAD_FREQ; @@ -9621,7 +9624,7 @@ void __init sched_init(void) #endif #ifdef CONFIG_RT_MUTEXES - plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); + plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); #endif /* @@ -9746,13 +9749,13 @@ void normalize_rt_tasks(void) continue; } - spin_lock(&p->pi_lock); + raw_spin_lock(&p->pi_lock); rq = __task_rq_lock(p); normalize_task(rq, p); __task_rq_unlock(rq); - spin_unlock(&p->pi_lock); + raw_spin_unlock(&p->pi_lock); } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); @@ -10115,9 +10118,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) struct rq *rq = cfs_rq->rq; unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); __set_se_shares(se, shares); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } static DEFINE_MUTEX(shares_mutex); @@ -10302,18 +10305,18 @@ static int tg_set_bandwidth(struct task_group *tg, if (err) goto unlock; - spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); + raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); tg->rt_bandwidth.rt_runtime = rt_runtime; for_each_possible_cpu(i) { struct rt_rq *rt_rq = tg->rt_rq[i]; - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = rt_runtime; - spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); } - spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); + raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); unlock: read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); @@ -10418,15 +10421,15 @@ static int sched_rt_global_constraints(void) if (sysctl_sched_rt_runtime == 0) return -EBUSY; - spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); + raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); for_each_possible_cpu(i) { struct rt_rq *rt_rq = &cpu_rq(i)->rt; - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = global_rt_runtime(); - spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); } - spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); + raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); return 0; } @@ -10717,9 +10720,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) /* * Take rq->lock to make 64-bit read safe on 32-bit platforms. */ - spin_lock_irq(&cpu_rq(cpu)->lock); + raw_spin_lock_irq(&cpu_rq(cpu)->lock); data = *cpuusage; - spin_unlock_irq(&cpu_rq(cpu)->lock); + raw_spin_unlock_irq(&cpu_rq(cpu)->lock); #else data = *cpuusage; #endif @@ -10735,9 +10738,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) /* * Take rq->lock to make 64-bit write safe on 32-bit platforms. */ - spin_lock_irq(&cpu_rq(cpu)->lock); + raw_spin_lock_irq(&cpu_rq(cpu)->lock); *cpuusage = val; - spin_unlock_irq(&cpu_rq(cpu)->lock); + raw_spin_unlock_irq(&cpu_rq(cpu)->lock); #else *cpuusage = val; #endif @@ -10971,9 +10974,9 @@ void synchronize_sched_expedited(void) init_completion(&req->done); req->task = NULL; req->dest_cpu = RCU_MIGRATION_NEED_QS; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); list_add(&req->list, &rq->migration_queue); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); wake_up_process(rq->migration_thread); } for_each_online_cpu(cpu) { @@ -10981,11 +10984,11 @@ void synchronize_sched_expedited(void) req = &per_cpu(rcu_migration_req, cpu); rq = cpu_rq(cpu); wait_for_completion(&req->done); - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) need_full_sync = 1; req->dest_cpu = RCU_MIGRATION_IDLE; - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; synchronize_sched_expedited_count++; diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 0f052fc674d..597b33099df 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) if (likely(newpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; - spin_lock_irqsave(&vec->lock, flags); + raw_spin_lock_irqsave(&vec->lock, flags); cpumask_set_cpu(cpu, vec->mask); vec->count++; if (vec->count == 1) set_bit(newpri, cp->pri_active); - spin_unlock_irqrestore(&vec->lock, flags); + raw_spin_unlock_irqrestore(&vec->lock, flags); } if (likely(oldpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; - spin_lock_irqsave(&vec->lock, flags); + raw_spin_lock_irqsave(&vec->lock, flags); vec->count--; if (!vec->count) clear_bit(oldpri, cp->pri_active); cpumask_clear_cpu(cpu, vec->mask); - spin_unlock_irqrestore(&vec->lock, flags); + raw_spin_unlock_irqrestore(&vec->lock, flags); } *currpri = newpri; @@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem) for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { struct cpupri_vec *vec = &cp->pri_to_cpu[i]; - spin_lock_init(&vec->lock); + raw_spin_lock_init(&vec->lock); vec->count = 0; if (!zalloc_cpumask_var(&vec->mask, gfp)) goto cleanup; diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h index 9a7e859b8fb..7cb5bb6b95b 100644 --- a/kernel/sched_cpupri.h +++ b/kernel/sched_cpupri.h @@ -12,7 +12,7 @@ /* values 2-101 are RT priorities 0-99 */ struct cpupri_vec { - spinlock_t lock; + raw_spinlock_t lock; int count; cpumask_var_t mask; }; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 5ae24fc65d7..67f95aada4b 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); if (cfs_rq->rb_leftmost) MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; last = __pick_last_entity(cfs_rq); @@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 804a411838f..5bedf6e3ebf 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1955,7 +1955,7 @@ static void task_fork_fair(struct task_struct *p) struct rq *rq = this_rq(); unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); if (unlikely(task_cpu(p) != this_cpu)) __set_task_cpu(p, this_cpu); @@ -1975,7 +1975,7 @@ static void task_fork_fair(struct task_struct *p) resched_task(rq->curr); } - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } /* diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 33d5384a73a..5f93b570d38 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq) static void dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) { - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); } static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index aecbd9c6b20..d2ea2828164 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) weight = cpumask_weight(rd->span); - spin_lock(&rt_b->rt_runtime_lock); + raw_spin_lock(&rt_b->rt_runtime_lock); rt_period = ktime_to_ns(rt_b->rt_period); for_each_cpu(i, rd->span) { struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); @@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) if (iter == rt_rq) continue; - spin_lock(&iter->rt_runtime_lock); + raw_spin_lock(&iter->rt_runtime_lock); /* * Either all rqs have inf runtime and there's nothing to steal * or __disable_runtime() below sets a specific rq to inf to @@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq) rt_rq->rt_runtime += diff; more = 1; if (rt_rq->rt_runtime == rt_period) { - spin_unlock(&iter->rt_runtime_lock); + raw_spin_unlock(&iter->rt_runtime_lock); break; } } next: - spin_unlock(&iter->rt_runtime_lock); + raw_spin_unlock(&iter->rt_runtime_lock); } - spin_unlock(&rt_b->rt_runtime_lock); + raw_spin_unlock(&rt_b->rt_runtime_lock); return more; } @@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq) s64 want; int i; - spin_lock(&rt_b->rt_runtime_lock); - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_b->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); /* * Either we're all inf and nobody needs to borrow, or we're * already disabled and thus have nothing to do, or we have @@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq) if (rt_rq->rt_runtime == RUNTIME_INF || rt_rq->rt_runtime == rt_b->rt_runtime) goto balanced; - spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); /* * Calculate the difference between what we started out with @@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq) if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) continue; - spin_lock(&iter->rt_runtime_lock); + raw_spin_lock(&iter->rt_runtime_lock); if (want > 0) { diff = min_t(s64, iter->rt_runtime, want); iter->rt_runtime -= diff; @@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq) iter->rt_runtime -= want; want -= want; } - spin_unlock(&iter->rt_runtime_lock); + raw_spin_unlock(&iter->rt_runtime_lock); if (!want) break; } - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); /* * We cannot be left wanting - that would mean some runtime * leaked out of the system. @@ -445,8 +445,8 @@ balanced: * runtime - in which case borrowing doesn't make sense. */ rt_rq->rt_runtime = RUNTIME_INF; - spin_unlock(&rt_rq->rt_runtime_lock); - spin_unlock(&rt_b->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_b->rt_runtime_lock); } } @@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq) { unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); __disable_runtime(rq); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } static void __enable_runtime(struct rq *rq) @@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq) for_each_leaf_rt_rq(rt_rq, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); - spin_lock(&rt_b->rt_runtime_lock); - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_b->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = rt_b->rt_runtime; rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; - spin_unlock(&rt_rq->rt_runtime_lock); - spin_unlock(&rt_b->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_b->rt_runtime_lock); } } @@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq) { unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); __enable_runtime(rq); - spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); } static int balance_runtime(struct rt_rq *rt_rq) @@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq) int more = 0; if (rt_rq->rt_time > rt_rq->rt_runtime) { - spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); more = do_balance_runtime(rt_rq); - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); } return more; @@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); struct rq *rq = rq_of_rt_rq(rt_rq); - spin_lock(&rq->lock); + raw_spin_lock(&rq->lock); if (rt_rq->rt_time) { u64 runtime; - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); if (rt_rq->rt_throttled) balance_runtime(rt_rq); runtime = rt_rq->rt_runtime; @@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; - spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); } else if (rt_rq->rt_nr_running) idle = 0; if (enqueue) sched_rt_rq_enqueue(rt_rq); - spin_unlock(&rq->lock); + raw_spin_unlock(&rq->lock); } return idle; @@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq) rt_rq = rt_rq_of_se(rt_se); if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { - spin_lock(&rt_rq->rt_runtime_lock); + raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_time += delta_exec; if (sched_rt_runtime_exceeded(rt_rq)) resched_task(curr); - spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_rq->rt_runtime_lock); } } } @@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) task_running(rq, task) || !task->se.on_rq)) { - spin_unlock(&lowest_rq->lock); + raw_spin_unlock(&lowest_rq->lock); lowest_rq = NULL; break; } diff --git a/kernel/smp.c b/kernel/smp.c index a8c76069cf5..de735a6637d 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); static struct { struct list_head queue; - spinlock_t lock; + raw_spinlock_t lock; } call_function __cacheline_aligned_in_smp = { .queue = LIST_HEAD_INIT(call_function.queue), - .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock), }; enum { @@ -35,7 +35,7 @@ struct call_function_data { struct call_single_queue { struct list_head list; - spinlock_t lock; + raw_spinlock_t lock; }; static DEFINE_PER_CPU(struct call_function_data, cfd_data); @@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void) for_each_possible_cpu(i) { struct call_single_queue *q = &per_cpu(call_single_queue, i); - spin_lock_init(&q->lock); + raw_spin_lock_init(&q->lock); INIT_LIST_HEAD(&q->list); } @@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait) unsigned long flags; int ipi; - spin_lock_irqsave(&dst->lock, flags); + raw_spin_lock_irqsave(&dst->lock, flags); ipi = list_empty(&dst->list); list_add_tail(&data->list, &dst->list); - spin_unlock_irqrestore(&dst->lock, flags); + raw_spin_unlock_irqrestore(&dst->lock, flags); /* * The list addition should be visible before sending the IPI @@ -171,7 +171,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait) void generic_smp_call_function_interrupt(void) { struct call_function_data *data; - int cpu = get_cpu(); + int cpu = smp_processor_id(); /* * Shouldn't receive this interrupt on a cpu that is not yet online. @@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void) refs = atomic_dec_return(&data->refs); WARN_ON(refs < 0); if (!refs) { - spin_lock(&call_function.lock); + raw_spin_lock(&call_function.lock); list_del_rcu(&data->csd.list); - spin_unlock(&call_function.lock); + raw_spin_unlock(&call_function.lock); } if (refs) @@ -212,7 +212,6 @@ void generic_smp_call_function_interrupt(void) csd_unlock(&data->csd); } - put_cpu(); } /* @@ -230,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void) */ WARN_ON_ONCE(!cpu_online(smp_processor_id())); - spin_lock(&q->lock); + raw_spin_lock(&q->lock); list_replace_init(&q->list, &list); - spin_unlock(&q->lock); + raw_spin_unlock(&q->lock); while (!list_empty(&list)) { struct call_single_data *data; @@ -449,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask, cpumask_clear_cpu(this_cpu, data->cpumask); atomic_set(&data->refs, cpumask_weight(data->cpumask)); - spin_lock_irqsave(&call_function.lock, flags); + raw_spin_lock_irqsave(&call_function.lock, flags); /* * Place entry at the _HEAD_ of the list, so that any cpu still * observing the entry in generic_smp_call_function_interrupt() * will not miss any other list entries: */ list_add_rcu(&data->csd.list, &call_function.queue); - spin_unlock_irqrestore(&call_function.lock, flags); + raw_spin_unlock_irqrestore(&call_function.lock, flags); /* * Make the list addition visible before sending the ipi. @@ -501,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function); void ipi_call_lock(void) { - spin_lock(&call_function.lock); + raw_spin_lock(&call_function.lock); } void ipi_call_unlock(void) { - spin_unlock(&call_function.lock); + raw_spin_unlock(&call_function.lock); } void ipi_call_lock_irq(void) { - spin_lock_irq(&call_function.lock); + raw_spin_lock_irq(&call_function.lock); } void ipi_call_unlock_irq(void) { - spin_unlock_irq(&call_function.lock); + raw_spin_unlock_irq(&call_function.lock); } diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 41e042219ff..be6517fb9c1 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -32,6 +32,8 @@ * include/linux/spinlock_api_smp.h */ #else +#define raw_read_can_lock(l) read_can_lock(l) +#define raw_write_can_lock(l) write_can_lock(l) /* * We build the __lock_function inlines here. They are too large for * inlining all over the place, but here is only one user per function @@ -42,49 +44,49 @@ * towards that other CPU that it should break the lock ASAP. */ #define BUILD_LOCK_OPS(op, locktype) \ -void __lockfunc __##op##_lock(locktype##_t *lock) \ +void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ { \ for (;;) { \ preempt_disable(); \ - if (likely(_raw_##op##_trylock(lock))) \ + if (likely(do_raw_##op##_trylock(lock))) \ break; \ preempt_enable(); \ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ + arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ } \ \ -unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ +unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ { \ unsigned long flags; \ \ for (;;) { \ preempt_disable(); \ local_irq_save(flags); \ - if (likely(_raw_##op##_trylock(lock))) \ + if (likely(do_raw_##op##_trylock(lock))) \ break; \ local_irq_restore(flags); \ preempt_enable(); \ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ + arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ return flags; \ } \ \ -void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ +void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ { \ - _##op##_lock_irqsave(lock); \ + _raw_##op##_lock_irqsave(lock); \ } \ \ -void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ +void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ { \ unsigned long flags; \ \ @@ -93,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ /* irq-disabling. We use the generic preemption-aware */ \ /* function: */ \ /**/ \ - flags = _##op##_lock_irqsave(lock); \ + flags = _raw_##op##_lock_irqsave(lock); \ local_bh_disable(); \ local_irq_restore(flags); \ } \ @@ -107,269 +109,269 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ * __[spin|read|write]_lock_irqsave() * __[spin|read|write]_lock_bh() */ -BUILD_LOCK_OPS(spin, spinlock); +BUILD_LOCK_OPS(spin, raw_spinlock); BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +#ifndef CONFIG_INLINE_SPIN_TRYLOCK +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { - preempt_disable(); - spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + return __raw_spin_trylock(lock); } -EXPORT_SYMBOL(_spin_lock_nested); +EXPORT_SYMBOL(_raw_spin_trylock); +#endif -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, - int subclass) +#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH +int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) { - unsigned long flags; - - local_irq_save(flags); - preempt_disable(); - spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, - _raw_spin_lock_flags, &flags); - return flags; + return __raw_spin_trylock_bh(lock); } -EXPORT_SYMBOL(_spin_lock_irqsave_nested); +EXPORT_SYMBOL(_raw_spin_trylock_bh); +#endif -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, - struct lockdep_map *nest_lock) +#ifndef CONFIG_INLINE_SPIN_LOCK +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { - preempt_disable(); - spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + __raw_spin_lock(lock); } -EXPORT_SYMBOL(_spin_lock_nest_lock); - +EXPORT_SYMBOL(_raw_spin_lock); #endif -#ifndef CONFIG_INLINE_SPIN_TRYLOCK -int __lockfunc _spin_trylock(spinlock_t *lock) +#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE +unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) { - return __spin_trylock(lock); + return __raw_spin_lock_irqsave(lock); } -EXPORT_SYMBOL(_spin_trylock); +EXPORT_SYMBOL(_raw_spin_lock_irqsave); #endif -#ifndef CONFIG_INLINE_READ_TRYLOCK -int __lockfunc _read_trylock(rwlock_t *lock) +#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ +void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) { - return __read_trylock(lock); + __raw_spin_lock_irq(lock); } -EXPORT_SYMBOL(_read_trylock); +EXPORT_SYMBOL(_raw_spin_lock_irq); #endif -#ifndef CONFIG_INLINE_WRITE_TRYLOCK -int __lockfunc _write_trylock(rwlock_t *lock) +#ifndef CONFIG_INLINE_SPIN_LOCK_BH +void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) { - return __write_trylock(lock); + __raw_spin_lock_bh(lock); } -EXPORT_SYMBOL(_write_trylock); +EXPORT_SYMBOL(_raw_spin_lock_bh); #endif -#ifndef CONFIG_INLINE_READ_LOCK -void __lockfunc _read_lock(rwlock_t *lock) +#ifndef CONFIG_INLINE_SPIN_UNLOCK +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { - __read_lock(lock); + __raw_spin_unlock(lock); } -EXPORT_SYMBOL(_read_lock); +EXPORT_SYMBOL(_raw_spin_unlock); #endif -#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE +void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { - return __spin_lock_irqsave(lock); + __raw_spin_unlock_irqrestore(lock, flags); } -EXPORT_SYMBOL(_spin_lock_irqsave); +EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); #endif -#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ -void __lockfunc _spin_lock_irq(spinlock_t *lock) +#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ +void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) { - __spin_lock_irq(lock); + __raw_spin_unlock_irq(lock); } -EXPORT_SYMBOL(_spin_lock_irq); +EXPORT_SYMBOL(_raw_spin_unlock_irq); #endif -#ifndef CONFIG_INLINE_SPIN_LOCK_BH -void __lockfunc _spin_lock_bh(spinlock_t *lock) +#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH +void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) { - __spin_lock_bh(lock); + __raw_spin_unlock_bh(lock); } -EXPORT_SYMBOL(_spin_lock_bh); +EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif -#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) +#ifndef CONFIG_INLINE_READ_TRYLOCK +int __lockfunc _raw_read_trylock(rwlock_t *lock) { - return __read_lock_irqsave(lock); + return __raw_read_trylock(lock); } -EXPORT_SYMBOL(_read_lock_irqsave); +EXPORT_SYMBOL(_raw_read_trylock); #endif -#ifndef CONFIG_INLINE_READ_LOCK_IRQ -void __lockfunc _read_lock_irq(rwlock_t *lock) +#ifndef CONFIG_INLINE_READ_LOCK +void __lockfunc _raw_read_lock(rwlock_t *lock) { - __read_lock_irq(lock); + __raw_read_lock(lock); } -EXPORT_SYMBOL(_read_lock_irq); +EXPORT_SYMBOL(_raw_read_lock); #endif -#ifndef CONFIG_INLINE_READ_LOCK_BH -void __lockfunc _read_lock_bh(rwlock_t *lock) +#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE +unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) { - __read_lock_bh(lock); + return __raw_read_lock_irqsave(lock); } -EXPORT_SYMBOL(_read_lock_bh); +EXPORT_SYMBOL(_raw_read_lock_irqsave); #endif -#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) +#ifndef CONFIG_INLINE_READ_LOCK_IRQ +void __lockfunc _raw_read_lock_irq(rwlock_t *lock) { - return __write_lock_irqsave(lock); + __raw_read_lock_irq(lock); } -EXPORT_SYMBOL(_write_lock_irqsave); +EXPORT_SYMBOL(_raw_read_lock_irq); #endif -#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ -void __lockfunc _write_lock_irq(rwlock_t *lock) +#ifndef CONFIG_INLINE_READ_LOCK_BH +void __lockfunc _raw_read_lock_bh(rwlock_t *lock) { - __write_lock_irq(lock); + __raw_read_lock_bh(lock); } -EXPORT_SYMBOL(_write_lock_irq); +EXPORT_SYMBOL(_raw_read_lock_bh); #endif -#ifndef CONFIG_INLINE_WRITE_LOCK_BH -void __lockfunc _write_lock_bh(rwlock_t *lock) +#ifndef CONFIG_INLINE_READ_UNLOCK +void __lockfunc _raw_read_unlock(rwlock_t *lock) { - __write_lock_bh(lock); + __raw_read_unlock(lock); } -EXPORT_SYMBOL(_write_lock_bh); +EXPORT_SYMBOL(_raw_read_unlock); #endif -#ifndef CONFIG_INLINE_SPIN_LOCK -void __lockfunc _spin_lock(spinlock_t *lock) +#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE +void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - __spin_lock(lock); + __raw_read_unlock_irqrestore(lock, flags); } -EXPORT_SYMBOL(_spin_lock); +EXPORT_SYMBOL(_raw_read_unlock_irqrestore); #endif -#ifndef CONFIG_INLINE_WRITE_LOCK -void __lockfunc _write_lock(rwlock_t *lock) +#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ +void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) { - __write_lock(lock); + __raw_read_unlock_irq(lock); } -EXPORT_SYMBOL(_write_lock); +EXPORT_SYMBOL(_raw_read_unlock_irq); #endif -#ifndef CONFIG_INLINE_SPIN_UNLOCK -void __lockfunc _spin_unlock(spinlock_t *lock) +#ifndef CONFIG_INLINE_READ_UNLOCK_BH +void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) { - __spin_unlock(lock); + __raw_read_unlock_bh(lock); } -EXPORT_SYMBOL(_spin_unlock); +EXPORT_SYMBOL(_raw_read_unlock_bh); #endif -#ifndef CONFIG_INLINE_WRITE_UNLOCK -void __lockfunc _write_unlock(rwlock_t *lock) +#ifndef CONFIG_INLINE_WRITE_TRYLOCK +int __lockfunc _raw_write_trylock(rwlock_t *lock) { - __write_unlock(lock); + return __raw_write_trylock(lock); } -EXPORT_SYMBOL(_write_unlock); +EXPORT_SYMBOL(_raw_write_trylock); #endif -#ifndef CONFIG_INLINE_READ_UNLOCK -void __lockfunc _read_unlock(rwlock_t *lock) +#ifndef CONFIG_INLINE_WRITE_LOCK +void __lockfunc _raw_write_lock(rwlock_t *lock) { - __read_unlock(lock); + __raw_write_lock(lock); } -EXPORT_SYMBOL(_read_unlock); +EXPORT_SYMBOL(_raw_write_lock); #endif -#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE +unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) { - __spin_unlock_irqrestore(lock, flags); + return __raw_write_lock_irqsave(lock); } -EXPORT_SYMBOL(_spin_unlock_irqrestore); +EXPORT_SYMBOL(_raw_write_lock_irqsave); #endif -#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ -void __lockfunc _spin_unlock_irq(spinlock_t *lock) +#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ +void __lockfunc _raw_write_lock_irq(rwlock_t *lock) { - __spin_unlock_irq(lock); + __raw_write_lock_irq(lock); } -EXPORT_SYMBOL(_spin_unlock_irq); +EXPORT_SYMBOL(_raw_write_lock_irq); #endif -#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH -void __lockfunc _spin_unlock_bh(spinlock_t *lock) +#ifndef CONFIG_INLINE_WRITE_LOCK_BH +void __lockfunc _raw_write_lock_bh(rwlock_t *lock) { - __spin_unlock_bh(lock); + __raw_write_lock_bh(lock); } -EXPORT_SYMBOL(_spin_unlock_bh); +EXPORT_SYMBOL(_raw_write_lock_bh); #endif -#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +#ifndef CONFIG_INLINE_WRITE_UNLOCK +void __lockfunc _raw_write_unlock(rwlock_t *lock) { - __read_unlock_irqrestore(lock, flags); + __raw_write_unlock(lock); } -EXPORT_SYMBOL(_read_unlock_irqrestore); +EXPORT_SYMBOL(_raw_write_unlock); #endif -#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ -void __lockfunc _read_unlock_irq(rwlock_t *lock) +#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE +void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - __read_unlock_irq(lock); + __raw_write_unlock_irqrestore(lock, flags); } -EXPORT_SYMBOL(_read_unlock_irq); +EXPORT_SYMBOL(_raw_write_unlock_irqrestore); #endif -#ifndef CONFIG_INLINE_READ_UNLOCK_BH -void __lockfunc _read_unlock_bh(rwlock_t *lock) +#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ +void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) { - __read_unlock_bh(lock); + __raw_write_unlock_irq(lock); } -EXPORT_SYMBOL(_read_unlock_bh); +EXPORT_SYMBOL(_raw_write_unlock_irq); #endif -#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH +void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) { - __write_unlock_irqrestore(lock, flags); + __raw_write_unlock_bh(lock); } -EXPORT_SYMBOL(_write_unlock_irqrestore); +EXPORT_SYMBOL(_raw_write_unlock_bh); #endif -#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ -void __lockfunc _write_unlock_irq(rwlock_t *lock) +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) { - __write_unlock_irq(lock); + preempt_disable(); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -EXPORT_SYMBOL(_write_unlock_irq); -#endif +EXPORT_SYMBOL(_raw_spin_lock_nested); -#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH -void __lockfunc _write_unlock_bh(rwlock_t *lock) +unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, + int subclass) { - __write_unlock_bh(lock); + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, + do_raw_spin_lock_flags, &flags); + return flags; } -EXPORT_SYMBOL(_write_unlock_bh); -#endif +EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); -#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH -int __lockfunc _spin_trylock_bh(spinlock_t *lock) +void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, + struct lockdep_map *nest_lock) { - return __spin_trylock_bh(lock); + preempt_disable(); + spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -EXPORT_SYMBOL(_spin_trylock_bh); +EXPORT_SYMBOL(_raw_spin_lock_nest_lock); + #endif notrace int in_lock_functions(unsigned long addr) diff --git a/kernel/sys.c b/kernel/sys.c index 585d6cd1004..20ccfb5da6a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -189,10 +189,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) !(user = find_user(who))) goto out_unlock; /* No processes for this user */ - do_each_thread(g, p) + do_each_thread(g, p) { if (__task_cred(p)->uid == who) error = set_one_prio(p, niceval, error); - while_each_thread(g, p); + } while_each_thread(g, p); if (who != cred->uid) free_uid(user); /* For find_user() */ break; @@ -252,13 +252,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) !(user = find_user(who))) goto out_unlock; /* No processes for this user */ - do_each_thread(g, p) + do_each_thread(g, p) { if (__task_cred(p)->uid == who) { niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; } - while_each_thread(g, p); + } while_each_thread(g, p); if (who != cred->uid) free_uid(user); /* for find_user() */ break; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 554ac4894f0..45e4bef0012 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1051,7 +1051,7 @@ static struct ctl_table vm_table[] = { .extra2 = &one_hundred, }, #ifdef CONFIG_HUGETLB_PAGE - { + { .procname = "nr_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), @@ -1059,7 +1059,18 @@ static struct ctl_table vm_table[] = { .proc_handler = hugetlb_sysctl_handler, .extra1 = (void *)&hugetlb_zero, .extra2 = (void *)&hugetlb_infinity, - }, + }, +#ifdef CONFIG_NUMA + { + .procname = "nr_hugepages_mempolicy", + .data = NULL, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = &hugetlb_mempolicy_sysctl_handler, + .extra1 = (void *)&hugetlb_zero, + .extra2 = (void *)&hugetlb_infinity, + }, +#endif { .procname = "hugetlb_shm_group", .data = &sysctl_hugetlb_shm_group, @@ -1120,7 +1131,8 @@ static struct ctl_table vm_table[] = { .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec, + .extra1 = &zero, }, #else { diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 20a8920029e..3d5fc0fd1cc 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -30,7 +30,7 @@ static LIST_HEAD(clockevents_released); static RAW_NOTIFIER_HEAD(clockevents_chain); /* Protection for the above */ -static DEFINE_SPINLOCK(clockevents_lock); +static DEFINE_RAW_SPINLOCK(clockevents_lock); /** * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds @@ -141,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb) unsigned long flags; int ret; - spin_lock_irqsave(&clockevents_lock, flags); + raw_spin_lock_irqsave(&clockevents_lock, flags); ret = raw_notifier_chain_register(&clockevents_chain, nb); - spin_unlock_irqrestore(&clockevents_lock, flags); + raw_spin_unlock_irqrestore(&clockevents_lock, flags); return ret; } @@ -185,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev) BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); BUG_ON(!dev->cpumask); - spin_lock_irqsave(&clockevents_lock, flags); + raw_spin_lock_irqsave(&clockevents_lock, flags); list_add(&dev->list, &clockevent_devices); clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); clockevents_notify_released(); - spin_unlock_irqrestore(&clockevents_lock, flags); + raw_spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_register_device); @@ -241,7 +241,7 @@ void clockevents_notify(unsigned long reason, void *arg) struct list_head *node, *tmp; unsigned long flags; - spin_lock_irqsave(&clockevents_lock, flags); + raw_spin_lock_irqsave(&clockevents_lock, flags); clockevents_do_notify(reason, arg); switch (reason) { @@ -256,7 +256,7 @@ void clockevents_notify(unsigned long reason, void *arg) default: break; } - spin_unlock_irqrestore(&clockevents_lock, flags); + raw_spin_unlock_irqrestore(&clockevents_lock, flags); } EXPORT_SYMBOL_GPL(clockevents_notify); #endif diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index c2ec25087a3..b3bafd5fc66 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device; /* FIXME: Use cpumask_var_t. */ static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); static DECLARE_BITMAP(tmpmask, NR_CPUS); -static DEFINE_SPINLOCK(tick_broadcast_lock); +static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); static int tick_broadcast_force; #ifdef CONFIG_TICK_ONESHOT @@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) unsigned long flags; int ret = 0; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); /* * Devices might be registered with both periodic and oneshot @@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) tick_broadcast_clear_oneshot(cpu); } } - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); return ret; } @@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask) */ static void tick_do_periodic_broadcast(void) { - spin_lock(&tick_broadcast_lock); + raw_spin_lock(&tick_broadcast_lock); cpumask_and(to_cpumask(tmpmask), cpu_online_mask, tick_get_broadcast_mask()); tick_do_broadcast(to_cpumask(tmpmask)); - spin_unlock(&tick_broadcast_lock); + raw_spin_unlock(&tick_broadcast_lock); } /* @@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) unsigned long flags; int cpu, bc_stopped; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); cpu = smp_processor_id(); td = &per_cpu(tick_cpu_device, cpu); @@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) tick_broadcast_setup_oneshot(bc); } out: - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } /* @@ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) unsigned long flags; unsigned int cpu = *cpup; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); @@ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) clockevents_shutdown(bc); } - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } void tick_suspend_broadcast(void) @@ -317,13 +317,13 @@ void tick_suspend_broadcast(void) struct clock_event_device *bc; unsigned long flags; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; if (bc) clockevents_shutdown(bc); - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } int tick_resume_broadcast(void) @@ -332,7 +332,7 @@ int tick_resume_broadcast(void) unsigned long flags; int broadcast = 0; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; @@ -351,7 +351,7 @@ int tick_resume_broadcast(void) break; } } - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); return broadcast; } @@ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) ktime_t now, next_event; int cpu; - spin_lock(&tick_broadcast_lock); + raw_spin_lock(&tick_broadcast_lock); again: dev->next_event.tv64 = KTIME_MAX; next_event.tv64 = KTIME_MAX; @@ -443,7 +443,7 @@ again: if (tick_broadcast_set_event(next_event, 0)) goto again; } - spin_unlock(&tick_broadcast_lock); + raw_spin_unlock(&tick_broadcast_lock); } /* @@ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) unsigned long flags; int cpu; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); /* * Periodic mode does not care about the enter/exit of power @@ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) } out: - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } /* @@ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void) struct clock_event_device *bc; unsigned long flags; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; bc = tick_broadcast_device.evtdev; if (bc) tick_broadcast_setup_oneshot(bc); - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } @@ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) unsigned long flags; unsigned int cpu = *cpup; - spin_lock_irqsave(&tick_broadcast_lock, flags); + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); /* * Clear the broadcast mask flag for the dead cpu, but do not @@ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) */ cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); - spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } /* diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 83c4417b6a3..b6b898d2eee 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); ktime_t tick_next_period; ktime_t tick_period; int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; -DEFINE_SPINLOCK(tick_device_lock); +static DEFINE_RAW_SPINLOCK(tick_device_lock); /* * Debugging: see timer_list.c @@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) int cpu, ret = NOTIFY_OK; unsigned long flags; - spin_lock_irqsave(&tick_device_lock, flags); + raw_spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); if (!cpumask_test_cpu(cpu, newdev->cpumask)) @@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); - spin_unlock_irqrestore(&tick_device_lock, flags); + raw_spin_unlock_irqrestore(&tick_device_lock, flags); return NOTIFY_STOP; out_bc: @@ -278,7 +278,7 @@ out_bc: if (tick_check_broadcast_device(newdev)) ret = NOTIFY_STOP; - spin_unlock_irqrestore(&tick_device_lock, flags); + raw_spin_unlock_irqrestore(&tick_device_lock, flags); return ret; } @@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup) struct clock_event_device *dev = td->evtdev; unsigned long flags; - spin_lock_irqsave(&tick_device_lock, flags); + raw_spin_lock_irqsave(&tick_device_lock, flags); td->mode = TICKDEV_MODE_PERIODIC; if (dev) { /* @@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup) clockevents_exchange_device(dev, NULL); td->evtdev = NULL; } - spin_unlock_irqrestore(&tick_device_lock, flags); + raw_spin_unlock_irqrestore(&tick_device_lock, flags); } static void tick_suspend(void) @@ -330,9 +330,9 @@ static void tick_suspend(void) struct tick_device *td = &__get_cpu_var(tick_cpu_device); unsigned long flags; - spin_lock_irqsave(&tick_device_lock, flags); + raw_spin_lock_irqsave(&tick_device_lock, flags); clockevents_shutdown(td->evtdev); - spin_unlock_irqrestore(&tick_device_lock, flags); + raw_spin_unlock_irqrestore(&tick_device_lock, flags); } static void tick_resume(void) @@ -341,7 +341,7 @@ static void tick_resume(void) unsigned long flags; int broadcast = tick_resume_broadcast(); - spin_lock_irqsave(&tick_device_lock, flags); + raw_spin_lock_irqsave(&tick_device_lock, flags); clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); if (!broadcast) { @@ -350,7 +350,7 @@ static void tick_resume(void) else tick_resume_oneshot(); } - spin_unlock_irqrestore(&tick_device_lock, flags); + raw_spin_unlock_irqrestore(&tick_device_lock, flags); } /* diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index b1c05bf75ee..290eefbc1f6 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -6,7 +6,6 @@ #define TICK_DO_TIMER_BOOT -2 DECLARE_PER_CPU(struct tick_device, tick_cpu_device); -extern spinlock_t tick_device_lock; extern ktime_t tick_next_period; extern ktime_t tick_period; extern int tick_do_timer_cpu __read_mostly; diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 9d80db4747d..28265636b6c 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, next_one: i = 0; - spin_lock_irqsave(&base->cpu_base->lock, flags); + raw_spin_lock_irqsave(&base->cpu_base->lock, flags); curr = base->first; /* @@ -100,13 +100,13 @@ next_one: timer = rb_entry(curr, struct hrtimer, node); tmp = *timer; - spin_unlock_irqrestore(&base->cpu_base->lock, flags); + raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); print_timer(m, timer, &tmp, i, now); next++; goto next_one; } - spin_unlock_irqrestore(&base->cpu_base->lock, flags); + raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); } static void diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 63b117e9eba..2f3b585b8d7 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock); /* * Per-CPU lookup locks for fast hash lookup: */ -static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock); +static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock); /* * Mutex to serialize state changes with show-stats activities: @@ -238,7 +238,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, /* * It doesnt matter which lock we take: */ - spinlock_t *lock; + raw_spinlock_t *lock; struct entry *entry, input; unsigned long flags; @@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, input.pid = pid; input.timer_flag = timer_flag; - spin_lock_irqsave(lock, flags); + raw_spin_lock_irqsave(lock, flags); if (!timer_stats_active) goto out_unlock; @@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, atomic_inc(&overflow_count); out_unlock: - spin_unlock_irqrestore(lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } static void print_name_offset(struct seq_file *m, unsigned long addr) @@ -348,10 +348,11 @@ static void sync_access(void) int cpu; for_each_online_cpu(cpu) { - spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); - spin_lock_irqsave(lock, flags); + raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); + + raw_spin_lock_irqsave(lock, flags); /* nothing */ - spin_unlock_irqrestore(lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } } @@ -409,7 +410,7 @@ void __init init_timer_stats(void) int cpu; for_each_possible_cpu(cpu) - spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); + raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); } static int __init init_tstats_procfs(void) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a1ca4956ab5..f58c9ad1583 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -423,7 +423,7 @@ struct ring_buffer_per_cpu { int cpu; struct ring_buffer *buffer; spinlock_t reader_lock; /* serialize readers */ - raw_spinlock_t lock; + arch_spinlock_t lock; struct lock_class_key lock_key; struct list_head *pages; struct buffer_page *head_page; /* read from head */ @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->buffer = buffer; spin_lock_init(&cpu_buffer->reader_lock); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); - cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); @@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) int ret; local_irq_save(flags); - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); again: /* @@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) goto again; out: - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return reader; @@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) synchronize_sched(); spin_lock_irqsave(&cpu_buffer->reader_lock, flags); - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); rb_iter_reset(iter); - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return iter; @@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) goto out; - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); rb_reset_cpu(cpu_buffer); - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); out: spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c82dfd92fdf..bb6b5e7fa2a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) * protected by per_cpu spinlocks. But the action of the swap * needs its own lock. * - * This is defined as a raw_spinlock_t in order to help + * This is defined as a arch_spinlock_t in order to help * with performance when lockdep debugging is enabled. * * It is also used in other places outside the update_max_tr * so it needs to be defined outside of the * CONFIG_TRACER_MAX_TRACE. */ -static raw_spinlock_t ftrace_max_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t ftrace_max_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE unsigned long __read_mostly tracing_max_latency; @@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); tr->buffer = max_tr.buffer; max_tr.buffer = buf; __update_max_tr(tr, tsk, cpu); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); } /** @@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); ftrace_disable_cpu(); @@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ @@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static int cmdline_idx; -static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; /* temporary disable recording */ static atomic_t trace_record_cmdline_disabled __read_mostly; @@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk) * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ - if (!__raw_spin_trylock(&trace_cmdline_lock)) + if (!arch_spin_trylock(&trace_cmdline_lock)) return; idx = map_pid_to_cmdline[tsk->pid]; @@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk) memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); - __raw_spin_unlock(&trace_cmdline_lock); + arch_spin_unlock(&trace_cmdline_lock); } void trace_find_cmdline(int pid, char comm[]) @@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[]) } preempt_disable(); - __raw_spin_lock(&trace_cmdline_lock); + arch_spin_lock(&trace_cmdline_lock); map = map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strcpy(comm, saved_cmdlines[map]); else strcpy(comm, "<...>"); - __raw_spin_unlock(&trace_cmdline_lock); + arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } @@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { - static raw_spinlock_t trace_buf_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; @@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) /* Lockdep uses trace_printk for lock tracing */ local_irq_save(flags); - __raw_spin_lock(&trace_buf_lock); + arch_spin_lock(&trace_buf_lock); len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); if (len > TRACE_BUF_SIZE || len < 0) @@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ring_buffer_unlock_commit(buffer, event); out_unlock: - __raw_spin_unlock(&trace_buf_lock); + arch_spin_unlock(&trace_buf_lock); local_irq_restore(flags); out: @@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_print; @@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr, pause_graph_tracing(); raw_local_irq_save(irq_flags); - __raw_spin_lock(&trace_buf_lock); + arch_spin_lock(&trace_buf_lock); len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); size = sizeof(*entry) + len + 1; @@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr, ring_buffer_unlock_commit(buffer, event); out_unlock: - __raw_spin_unlock(&trace_buf_lock); + arch_spin_unlock(&trace_buf_lock); raw_local_irq_restore(irq_flags); unpause_graph_tracing(); out: @@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, mutex_lock(&tracing_cpumask_update_lock); local_irq_disable(); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are @@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, atomic_dec(&global_trace.data[cpu]->disabled); } } - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); local_irq_enable(); cpumask_copy(tracing_cpumask, tracing_cpumask_new); @@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s) static void __ftrace_dump(bool disable_tracing) { - static raw_spinlock_t ftrace_dump_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t ftrace_dump_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; @@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing) /* only one dump */ local_irq_save(flags); - __raw_spin_lock(&ftrace_dump_lock); + arch_spin_lock(&ftrace_dump_lock); if (dump_ran) goto out; @@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing) } out: - __raw_spin_unlock(&ftrace_dump_lock); + arch_spin_unlock(&ftrace_dump_lock); local_irq_restore(flags); } diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 878c03f386b..84a3a7ba072 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -71,10 +71,10 @@ u64 notrace trace_clock(void) /* keep prev_time and lock in the same cacheline. */ static struct { u64 prev_time; - raw_spinlock_t lock; + arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { - .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, + .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) @@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void) if (unlikely(in_nmi())) goto out; - __raw_spin_lock(&trace_clock_struct.lock); + arch_spin_lock(&trace_clock_struct.lock); /* * TODO: if this happens often then maybe we should reset @@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void) trace_clock_struct.prev_time = now; - __raw_spin_unlock(&trace_clock_struct.lock); + arch_spin_unlock(&trace_clock_struct.lock); out: raw_local_irq_restore(flags); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 26185d72767..0271742abb8 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -28,8 +28,8 @@ static int wakeup_current_cpu; static unsigned wakeup_prio = -1; static int wakeup_rt; -static raw_spinlock_t wakeup_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t wakeup_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static void __wakeup_reset(struct trace_array *tr); @@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, goto out; local_irq_save(flags); - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) @@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, out_unlock: __wakeup_reset(wakeup_trace); - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); @@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr) tracing_reset_online_cpus(tr); local_irq_save(flags); - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); __wakeup_reset(tr); - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); } @@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) goto out; /* interrupts should be off from try_to_wake_up */ - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) @@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); } diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index dc98309e839..280fea470d6 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) /* Don't allow flipping of max traces now */ local_irq_save(flags); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(tr->buffer); @@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) break; } tracing_on(); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); if (count) diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8504ac71e4e..678a5120ee3 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = { }; static unsigned long max_stack_size; -static raw_spinlock_t max_stack_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t max_stack_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); @@ -54,7 +54,7 @@ static inline void check_stack(void) return; local_irq_save(flags); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); /* a race could have already updated it */ if (this_size <= max_stack_size) @@ -103,7 +103,7 @@ static inline void check_stack(void) } out: - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); } @@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, return ret; local_irq_save(flags); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); *ptr = val; - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); return count; @@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos) { local_irq_disable(); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); if (*pos == 0) return SEQ_START_TOKEN; @@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) static void t_stop(struct seq_file *m, void *p) { - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_enable(); } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2f22cf4576d..8cf9938dd14 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -575,7 +575,7 @@ config DEBUG_BUGVERBOSE depends on BUG depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 - default !EMBEDDED + default y help Say Y here to make BUG() panics output the file name and line number of the BUG call as well as the EIP and oops trace. This aids diff --git a/lib/argv_split.c b/lib/argv_split.c index 5205a8dae5b..4b1b083f219 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c @@ -4,17 +4,10 @@ #include <linux/kernel.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/slab.h> #include <linux/module.h> -static const char *skip_sep(const char *cp) -{ - while (*cp && isspace(*cp)) - cp++; - - return cp; -} - static const char *skip_arg(const char *cp) { while (*cp && !isspace(*cp)) @@ -28,7 +21,7 @@ static int count_argc(const char *str) int count = 0; while (*str) { - str = skip_sep(str); + str = skip_spaces(str); if (*str) { count++; str = skip_arg(str); @@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp) argvp = argv; while (*str) { - str = skip_sep(str); + str = skip_spaces(str); if (*str) { const char *p = str; diff --git a/lib/crc32.c b/lib/crc32.c index 49d1c9e3ce3..02e3b31b3a7 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -42,6 +42,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); MODULE_DESCRIPTION("Ethernet CRC32 calculations"); MODULE_LICENSE("GPL"); +#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 + +static inline u32 +crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) +{ +# ifdef __LITTLE_ENDIAN +# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8) +# else +# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) +# endif + const u32 *b = (const u32 *)buf; + size_t rem_len; + + /* Align it */ + if (unlikely((long)b & 3 && len)) { + u8 *p = (u8 *)b; + do { + DO_CRC(*p++); + } while ((--len) && ((long)p)&3); + b = (u32 *)p; + } + rem_len = len & 3; + /* load data 32 bits wide, xor data 32 bits wide. */ + len = len >> 2; + for (--b; len; --len) { + crc ^= *++b; /* use pre increment for speed */ + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + DO_CRC(0); + } + len = rem_len; + /* And the last few bytes */ + if (len) { + u8 *p = (u8 *)(b + 1) - 1; + do { + DO_CRC(*++p); /* use pre increment for speed */ + } while (--len); + } + return crc; +} +#endif /** * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for @@ -72,48 +114,10 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) { # if CRC_LE_BITS == 8 - const u32 *b =(u32 *)p; const u32 *tab = crc32table_le; -# ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) -# else -# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) -# endif - crc = __cpu_to_le32(crc); - /* Align it */ - if(unlikely(((long)b)&3 && len)){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (void *)p; - } while ((--len) && ((long)b)&3 ); - } - if(likely(len >= 4)){ - /* load data 32 bits wide, xor data 32 bits wide. */ - size_t save_len = len & 3; - len = len >> 2; - --b; /* use pre increment below(*++b) for speed */ - do { - crc ^= *++b; - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - } while (--len); - b++; /* point to next byte(s) */ - len = save_len; - } - /* And the last few bytes */ - if(len){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (void *)p; - } while (--len); - } - + crc = crc32_body(crc, p, len, tab); return __le32_to_cpu(crc); #undef ENDIAN_SHIFT #undef DO_CRC @@ -170,47 +174,10 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { # if CRC_BE_BITS == 8 - const u32 *b =(u32 *)p; const u32 *tab = crc32table_be; -# ifdef __LITTLE_ENDIAN -# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) -# else -# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) -# endif - crc = __cpu_to_be32(crc); - /* Align it */ - if(unlikely(((long)b)&3 && len)){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (u32 *)p; - } while ((--len) && ((long)b)&3 ); - } - if(likely(len >= 4)){ - /* load data 32 bits wide, xor data 32 bits wide. */ - size_t save_len = len & 3; - len = len >> 2; - --b; /* use pre increment below(*++b) for speed */ - do { - crc ^= *++b; - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - DO_CRC(0); - } while (--len); - b++; /* point to next byte(s) */ - len = save_len; - } - /* And the last few bytes */ - if(len){ - do { - u8 *p = (u8 *)b; - DO_CRC(*p++); - b = (void *)p; - } while (--len); - } + crc = crc32_body(crc, p, len, tab); return __be32_to_cpu(crc); #undef ENDIAN_SHIFT #undef DO_CRC diff --git a/lib/ctype.c b/lib/ctype.c index d02ace14a32..26baa620e95 100644 --- a/lib/ctype.c +++ b/lib/ctype.c @@ -7,30 +7,30 @@ #include <linux/ctype.h> #include <linux/module.h> -unsigned char _ctype[] = { -_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ -_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ -_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ -_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ -_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ -_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ -_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ -_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ -_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ -_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ -_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ -_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ -_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ -_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ -_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ -_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ -_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ -_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ -_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ -_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ -_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ -_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ +const unsigned char _ctype[] = { +_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ +_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ +_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ +_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ +_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ +_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ +_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ +_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ +_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ +_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ +_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ +_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ +_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ +_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ EXPORT_SYMBOL(_ctype); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index eae56fddfa3..a9a8996d286 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -26,14 +26,14 @@ struct debug_bucket { struct hlist_head list; - spinlock_t lock; + raw_spinlock_t lock; }; static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; -static DEFINE_SPINLOCK(pool_lock); +static DEFINE_RAW_SPINLOCK(pool_lock); static HLIST_HEAD(obj_pool); @@ -96,10 +96,10 @@ static int fill_pool(void) if (!new) return obj_pool_free; - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); obj_pool_free++; - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); } return obj_pool_free; } @@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) { struct debug_obj *obj = NULL; - spin_lock(&pool_lock); + raw_spin_lock(&pool_lock); if (obj_pool.first) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); @@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) if (obj_pool_free < obj_pool_min_free) obj_pool_min_free = obj_pool_free; } - spin_unlock(&pool_lock); + raw_spin_unlock(&pool_lock); return obj; } @@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work) struct debug_obj *obj; unsigned long flags; - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); while (obj_pool_free > ODEBUG_POOL_SIZE) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); hlist_del(&obj->node); @@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work) * We release pool_lock across kmem_cache_free() to * avoid contention on pool_lock. */ - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); kmem_cache_free(obj_cache, obj); - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); } - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); } /* @@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj) unsigned long flags; int sched = 0; - spin_lock_irqsave(&pool_lock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); /* * schedule work when the pool is filled and the cache is * initialized: @@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj) hlist_add_head(&obj->node, &obj_pool); obj_pool_free++; obj_pool_used--; - spin_unlock_irqrestore(&pool_lock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); if (sched) schedule_work(&debug_obj_work); } @@ -221,9 +221,9 @@ static void debug_objects_oom(void) printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); hlist_move_list(&db->list, &freelist); - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { @@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) { obj = alloc_object(addr, db, descr); if (!obj) { debug_objects_enabled = 0; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_objects_oom(); return; } @@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "init"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_init, addr, state); return; @@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) break; } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } /** @@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { @@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "activate"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_activate, addr, state); return; @@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) default: break; } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); return; } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); /* * This happens when a static object is activated. We * let the type specific code decide whether this is @@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { @@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) debug_print_object(&o, "deactivate"); } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } /** @@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) @@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "destroy"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_destroy, addr, state); return; @@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) break; } out_unlock: - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } /** @@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) @@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "free"); state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_free, addr, state); return; default: hlist_del(&obj->node); - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); free_object(obj); return; } out_unlock: - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); } #ifdef CONFIG_DEBUG_OBJECTS_FREE @@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) repeat: cnt = 0; - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { cnt++; oaddr = (unsigned long) obj->object; @@ -587,7 +587,7 @@ repeat: debug_print_object(obj, "free"); descr = obj->descr; state = obj->state; - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_free, (void *) oaddr, state); goto repeat; @@ -597,7 +597,7 @@ repeat: break; } } - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { @@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) db = get_bucket((unsigned long) addr); - spin_lock_irqsave(&db->lock, flags); + raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj && state != ODEBUG_STATE_NONE) { @@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) } res = 0; out: - spin_unlock_irqrestore(&db->lock, flags); + raw_spin_unlock_irqrestore(&db->lock, flags); if (res) debug_objects_enabled = 0; return res; @@ -907,7 +907,7 @@ void __init debug_objects_early_init(void) int i; for (i = 0; i < ODEBUG_HASH_SIZE; i++) - spin_lock_init(&obj_hash[i].lock); + raw_spin_lock_init(&obj_hash[i].lock); for (i = 0; i < ODEBUG_POOL_SIZE; i++) hlist_add_head(&obj_static_pool[i].node, &obj_pool); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index e22c148e4b7..f9350291598 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -21,6 +21,7 @@ #include <linux/list.h> #include <linux/sysctl.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/uaccess.h> #include <linux/dynamic_debug.h> #include <linux/debugfs.h> @@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) char *end; /* Skip leading whitespace */ - while (*buf && isspace(*buf)) - buf++; + buf = skip_spaces(buf); if (!*buf) break; /* oh, it was trailing whitespace */ diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 5526b46aba9..b135d04aa48 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -23,7 +23,7 @@ * * Don't use in new code. */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); +static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); /* @@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); * If it successfully gets the lock, it should increment * the preemption count like any spinlock does. * - * (This works on UP too - _raw_spin_trylock will never + * (This works on UP too - do_raw_spin_trylock will never * return false in that case) */ int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(&kernel_flag)) { + while (!do_raw_spin_trylock(&kernel_flag)) { if (need_resched()) return -EAGAIN; cpu_relax(); @@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void) void __lockfunc __release_kernel_lock(void) { - _raw_spin_unlock(&kernel_flag); + do_raw_spin_unlock(&kernel_flag); preempt_enable_no_resched(); } /* * These are the BKL spinlocks - we try to be polite about preemption. * If SMP is not on (ie UP preemption), this all goes away because the - * _raw_spin_trylock() will always succeed. + * do_raw_spin_trylock() will always succeed. */ #ifdef CONFIG_PREEMPT static inline void __lock_kernel(void) { preempt_disable(); - if (unlikely(!_raw_spin_trylock(&kernel_flag))) { + if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { /* * If preemption was disabled even before this * was called, there's nothing we can be polite * about - just spin. */ if (preempt_count() > 1) { - _raw_spin_lock(&kernel_flag); + do_raw_spin_lock(&kernel_flag); return; } @@ -82,10 +82,10 @@ static inline void __lock_kernel(void) */ do { preempt_enable(); - while (spin_is_locked(&kernel_flag)) + while (raw_spin_is_locked(&kernel_flag)) cpu_relax(); preempt_disable(); - } while (!_raw_spin_trylock(&kernel_flag)); + } while (!do_raw_spin_trylock(&kernel_flag)); } } @@ -96,7 +96,7 @@ static inline void __lock_kernel(void) */ static inline void __lock_kernel(void) { - _raw_spin_lock(&kernel_flag); + do_raw_spin_lock(&kernel_flag); } #endif @@ -106,7 +106,7 @@ static inline void __unlock_kernel(void) * the BKL is not covered by lockdep, so we open-code the * unlocking sequence (and thus avoid the dep-chain ops): */ - _raw_spin_unlock(&kernel_flag); + do_raw_spin_unlock(&kernel_flag); preempt_enable(); } diff --git a/lib/parser.c b/lib/parser.c index b00d02059a5..fb34977246b 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[]) args[argc].from = s; switch (*p++) { - case 's': - if (strlen(s) == 0) + case 's': { + size_t str_len = strlen(s); + + if (str_len == 0) return 0; - else if (len == -1 || len > strlen(s)) - len = strlen(s); + if (len == -1 || len > str_len) + len = str_len; args[argc].to = s + len; break; + } case 'd': simple_strtol(s, &args[argc].to, 0); goto num; diff --git a/lib/plist.c b/lib/plist.c index d6c64a824e1..1471988d919 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top) static void plist_check_head(struct plist_head *head) { - WARN_ON(!head->lock); - if (head->lock) - WARN_ON_SMP(!spin_is_locked(head->lock)); + WARN_ON(!head->rawlock && !head->spinlock); + if (head->rawlock) + WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); + if (head->spinlock) + WARN_ON_SMP(!spin_is_locked(head->spinlock)); plist_check_list(&head->prio_list); plist_check_list(&head->node_list); } diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 9df3ca56db1..ccf95bff798 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,6 +17,19 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; +int rwsem_is_locked(struct rw_semaphore *sem) +{ + int ret = 1; + unsigned long flags; + + if (spin_trylock_irqsave(&sem->wait_lock, flags)) { + ret = (sem->activity != 0); + spin_unlock_irqrestore(&sem->wait_lock, flags); + } + return ret; +} +EXPORT_SYMBOL(rwsem_is_locked); + /* * initialise the semaphore */ @@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } +EXPORT_SYMBOL(__init_rwsem); /* * handle the lock release when processes blocked on it that can now run @@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); } -EXPORT_SYMBOL(__init_rwsem); -EXPORT_SYMBOL(__down_read); -EXPORT_SYMBOL(__down_read_trylock); -EXPORT_SYMBOL(__down_write_nested); -EXPORT_SYMBOL(__down_write); -EXPORT_SYMBOL(__down_write_trylock); -EXPORT_SYMBOL(__up_read); -EXPORT_SYMBOL(__up_write); -EXPORT_SYMBOL(__downgrade_write); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9c4b0256490..4755b98b6df 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -13,8 +13,8 @@ #include <linux/delay.h> #include <linux/module.h> -void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) +void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } -EXPORT_SYMBOL(__spin_lock_init); +EXPORT_SYMBOL(__raw_spin_lock_init); void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) @@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; + lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; @@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, EXPORT_SYMBOL(__rwlock_init); -static void spin_bug(spinlock_t *lock, const char *msg) +static void spin_bug(raw_spinlock_t *lock, const char *msg) { struct task_struct *owner = NULL; @@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg) #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) static inline void -debug_spin_lock_before(spinlock_t *lock) +debug_spin_lock_before(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(lock->owner == current, lock, "recursion"); @@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock) lock, "cpu recursion"); } -static inline void debug_spin_lock_after(spinlock_t *lock) +static inline void debug_spin_lock_after(raw_spinlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } -static inline void debug_spin_unlock(spinlock_t *lock) +static inline void debug_spin_unlock(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); - SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); + SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); @@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock) lock->owner_cpu = -1; } -static void __spin_lock_debug(spinlock_t *lock) +static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_spin_trylock(&lock->raw_lock)) + if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } @@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock) } } -void _raw_spin_lock(spinlock_t *lock) +void do_raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); - if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) + if (unlikely(!arch_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); debug_spin_lock_after(lock); } -int _raw_spin_trylock(spinlock_t *lock) +int do_raw_spin_trylock(raw_spinlock_t *lock) { - int ret = __raw_spin_trylock(&lock->raw_lock); + int ret = arch_spin_trylock(&lock->raw_lock); if (ret) debug_spin_lock_after(lock); @@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock) return ret; } -void _raw_spin_unlock(spinlock_t *lock) +void do_raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); - __raw_spin_unlock(&lock->raw_lock); + arch_spin_unlock(&lock->raw_lock); } static void rwlock_bug(rwlock_t *lock, const char *msg) @@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_read_trylock(&lock->raw_lock)) + if (arch_read_trylock(&lock->raw_lock)) return; __delay(1); } @@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock) } #endif -void _raw_read_lock(rwlock_t *lock) +void do_raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_lock(&lock->raw_lock); + arch_read_lock(&lock->raw_lock); } -int _raw_read_trylock(rwlock_t *lock) +int do_raw_read_trylock(rwlock_t *lock) { - int ret = __raw_read_trylock(&lock->raw_lock); + int ret = arch_read_trylock(&lock->raw_lock); #ifndef CONFIG_SMP /* @@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock) return ret; } -void _raw_read_unlock(rwlock_t *lock) +void do_raw_read_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_unlock(&lock->raw_lock); + arch_read_unlock(&lock->raw_lock); } static inline void debug_write_lock_before(rwlock_t *lock) @@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_write_trylock(&lock->raw_lock)) + if (arch_write_trylock(&lock->raw_lock)) return; __delay(1); } @@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock) } #endif -void _raw_write_lock(rwlock_t *lock) +void do_raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); - __raw_write_lock(&lock->raw_lock); + arch_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } -int _raw_write_trylock(rwlock_t *lock) +int do_raw_write_trylock(rwlock_t *lock) { - int ret = __raw_write_trylock(&lock->raw_lock); + int ret = arch_write_trylock(&lock->raw_lock); if (ret) debug_write_lock_after(lock); @@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock) return ret; } -void _raw_write_unlock(rwlock_t *lock) +void do_raw_write_unlock(rwlock_t *lock) { debug_write_unlock(lock); - __raw_write_unlock(&lock->raw_lock); + arch_write_unlock(&lock->raw_lock); } diff --git a/lib/string.c b/lib/string.c index e96421ab9a9..afce96af3af 100644 --- a/lib/string.c +++ b/lib/string.c @@ -338,20 +338,34 @@ EXPORT_SYMBOL(strnchr); #endif /** - * strstrip - Removes leading and trailing whitespace from @s. + * skip_spaces - Removes leading whitespace from @s. + * @s: The string to be stripped. + * + * Returns a pointer to the first non-whitespace character in @s. + */ +char *skip_spaces(const char *str) +{ + while (isspace(*str)) + ++str; + return (char *)str; +} +EXPORT_SYMBOL(skip_spaces); + +/** + * strim - Removes leading and trailing whitespace from @s. * @s: The string to be stripped. * * Note that the first trailing whitespace is replaced with a %NUL-terminator * in the given string @s. Returns a pointer to the first non-whitespace * character in @s. */ -char *strstrip(char *s) +char *strim(char *s) { size_t size; char *end; + s = skip_spaces(s); size = strlen(s); - if (!size) return s; @@ -360,12 +374,9 @@ char *strstrip(char *s) end--; *(end + 1) = '\0'; - while (*s && isspace(*s)) - s++; - return s; } -EXPORT_SYMBOL(strstrip); +EXPORT_SYMBOL(strim); #ifndef __HAVE_ARCH_STRLEN /** diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 6438cd5599e..735343fc857 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -9,7 +9,7 @@ * Wirzenius wrote this portably, Torvalds fucked it up :-) */ -/* +/* * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> * - changed to provide snprintf and vsnprintf functions * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> @@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp) } /** - * simple_strtoul - convert a string to an unsigned long + * simple_strtoull - convert a string to an unsigned long long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) +unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { - unsigned long result = 0; + unsigned long long result = 0; if (!base) base = simple_guess_base(cp); @@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) result = result * base + value; cp++; } - if (endp) *endp = (char *)cp; + return result; } -EXPORT_SYMBOL(simple_strtoul); +EXPORT_SYMBOL(simple_strtoull); /** - * simple_strtol - convert a string to a signed long + * simple_strtoul - convert a string to an unsigned long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -long simple_strtol(const char *cp, char **endp, unsigned int base) +unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) { - if(*cp == '-') - return -simple_strtoul(cp + 1, endp, base); - return simple_strtoul(cp, endp, base); + return simple_strtoull(cp, endp, base); } -EXPORT_SYMBOL(simple_strtol); +EXPORT_SYMBOL(simple_strtoul); /** - * simple_strtoull - convert a string to an unsigned long long + * simple_strtol - convert a string to a signed long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ -unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) +long simple_strtol(const char *cp, char **endp, unsigned int base) { - unsigned long long result = 0; - - if (!base) - base = simple_guess_base(cp); - - if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') - cp += 2; - - while (isxdigit(*cp)) { - unsigned int value; - - value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; - if (value >= base) - break; - result = result * base + value; - cp++; - } + if (*cp == '-') + return -simple_strtoul(cp + 1, endp, base); - if (endp) - *endp = (char *)cp; - return result; + return simple_strtoul(cp, endp, base); } -EXPORT_SYMBOL(simple_strtoull); +EXPORT_SYMBOL(simple_strtol); /** * simple_strtoll - convert a string to a signed long long @@ -132,8 +113,9 @@ EXPORT_SYMBOL(simple_strtoull); */ long long simple_strtoll(const char *cp, char **endp, unsigned int base) { - if(*cp=='-') + if (*cp == '-') return -simple_strtoull(cp + 1, endp, base); + return simple_strtoull(cp, endp, base); } @@ -173,6 +155,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) val = simple_strtoul(cp, &tail, base); if (tail == cp) return -EINVAL; + if ((*tail == '\0') || ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { *res = val; @@ -285,10 +268,11 @@ EXPORT_SYMBOL(strict_strtoll); static int skip_atoi(const char **s) { - int i=0; + int i = 0; while (isdigit(**s)) i = i*10 + *((*s)++) - '0'; + return i; } @@ -302,7 +286,7 @@ static int skip_atoi(const char **s) /* Formats correctly any integer in [0,99999]. * Outputs from one to five digits depending on input. * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ -static char* put_dec_trunc(char *buf, unsigned q) +static char *put_dec_trunc(char *buf, unsigned q) { unsigned d3, d2, d1, d0; d1 = (q>>4) & 0xf; @@ -331,14 +315,15 @@ static char* put_dec_trunc(char *buf, unsigned q) d3 = d3 - 10*q; *buf++ = d3 + '0'; /* next digit */ if (q != 0) - *buf++ = q + '0'; /* most sign. digit */ + *buf++ = q + '0'; /* most sign. digit */ } } } + return buf; } /* Same with if's removed. Always emits five digits */ -static char* put_dec_full(char *buf, unsigned q) +static char *put_dec_full(char *buf, unsigned q) { /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ /* but anyway, gcc produces better code with full-sized ints */ @@ -347,14 +332,15 @@ static char* put_dec_full(char *buf, unsigned q) d2 = (q>>8) & 0xf; d3 = (q>>12); - /* Possible ways to approx. divide by 10 */ - /* gcc -O2 replaces multiply with shifts and adds */ - // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) - // (x * 0x67) >> 10: 1100111 - // (x * 0x34) >> 9: 110100 - same - // (x * 0x1a) >> 8: 11010 - same - // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) - + /* + * Possible ways to approx. divide by 10 + * gcc -O2 replaces multiply with shifts and adds + * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) + * (x * 0x67) >> 10: 1100111 + * (x * 0x34) >> 9: 110100 - same + * (x * 0x1a) >> 8: 11010 - same + * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) + */ d0 = 6*(d3 + d2 + d1) + (q & 0xf); q = (d0 * 0xcd) >> 11; d0 = d0 - 10*q; @@ -375,10 +361,11 @@ static char* put_dec_full(char *buf, unsigned q) d3 = d3 - 10*q; *buf++ = d3 + '0'; *buf++ = q + '0'; + return buf; } /* No inlining helps gcc to use registers better */ -static noinline char* put_dec(char *buf, unsigned long long num) +static noinline char *put_dec(char *buf, unsigned long long num) { while (1) { unsigned rem; @@ -448,9 +435,9 @@ static char *number(char *buf, char *end, unsigned long long num, spec.flags &= ~ZEROPAD; sign = 0; if (spec.flags & SIGN) { - if ((signed long long) num < 0) { + if ((signed long long)num < 0) { sign = '-'; - num = - (signed long long) num; + num = -(signed long long)num; spec.field_width--; } else if (spec.flags & PLUS) { sign = '+'; @@ -478,7 +465,9 @@ static char *number(char *buf, char *end, unsigned long long num, else if (spec.base != 10) { /* 8 or 16 */ int mask = spec.base - 1; int shift = 3; - if (spec.base == 16) shift = 4; + + if (spec.base == 16) + shift = 4; do { tmp[i++] = (digits[((unsigned char)num) & mask] | locase); num >>= shift; @@ -493,7 +482,7 @@ static char *number(char *buf, char *end, unsigned long long num, /* leading space padding */ spec.field_width -= spec.precision; if (!(spec.flags & (ZEROPAD+LEFT))) { - while(--spec.field_width >= 0) { + while (--spec.field_width >= 0) { if (buf < end) *buf = ' '; ++buf; @@ -543,15 +532,16 @@ static char *number(char *buf, char *end, unsigned long long num, *buf = ' '; ++buf; } + return buf; } -static char *string(char *buf, char *end, char *s, struct printf_spec spec) +static char *string(char *buf, char *end, const char *s, struct printf_spec spec) { int len, i; if ((unsigned long)s < PAGE_SIZE) - s = "<NULL>"; + s = "(null)"; len = strnlen(s, spec.precision); @@ -572,6 +562,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec) *buf = ' '; ++buf; } + return buf; } @@ -585,11 +576,13 @@ static char *symbol_string(char *buf, char *end, void *ptr, sprint_symbol(sym, value); else kallsyms_lookup(value, NULL, NULL, NULL, sym); + return string(buf, end, sym, spec); #else - spec.field_width = 2*sizeof(void *); + spec.field_width = 2 * sizeof(void *); spec.flags |= SPECIAL | SMALL | ZEROPAD; spec.base = 16; + return number(buf, end, value, spec); #endif } @@ -718,22 +711,19 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) if (i < 3) *p++ = '.'; } - *p = '\0'; + return p; } static char *ip6_compressed_string(char *p, const char *addr) { - int i; - int j; - int range; + int i, j, range; unsigned char zerolength[8]; int longest = 1; int colonpos = -1; u16 word; - u8 hi; - u8 lo; + u8 hi, lo; bool needcolon = false; bool useIPv4; struct in6_addr in6; @@ -787,8 +777,9 @@ static char *ip6_compressed_string(char *p, const char *addr) p = pack_hex_byte(p, hi); else *p++ = hex_asc_lo(hi); + p = pack_hex_byte(p, lo); } - if (hi || lo > 0x0f) + else if (lo > 0x0f) p = pack_hex_byte(p, lo); else *p++ = hex_asc_lo(lo); @@ -800,22 +791,23 @@ static char *ip6_compressed_string(char *p, const char *addr) *p++ = ':'; p = ip4_string(p, &in6.s6_addr[12], false); } - *p = '\0'; + return p; } static char *ip6_string(char *p, const char *addr, const char *fmt) { int i; + for (i = 0; i < 8; i++) { p = pack_hex_byte(p, *addr++); p = pack_hex_byte(p, *addr++); if (fmt[0] == 'I' && i != 7) *p++ = ':'; } - *p = '\0'; + return p; } @@ -842,6 +834,52 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, return string(buf, end, ip4_addr, spec); } +static char *uuid_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) +{ + char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; + char *p = uuid; + int i; + static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; + static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; + const u8 *index = be; + bool uc = false; + + switch (*(++fmt)) { + case 'L': + uc = true; /* fall-through */ + case 'l': + index = le; + break; + case 'B': + uc = true; + break; + } + + for (i = 0; i < 16; i++) { + p = pack_hex_byte(p, addr[index[i]]); + switch (i) { + case 3: + case 5: + case 7: + case 9: + *p++ = '-'; + break; + } + } + + *p = 0; + + if (uc) { + p = uuid; + do { + *p = toupper(*p); + } while (*(++p)); + } + + return string(buf, end, uuid, spec); +} + /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format @@ -866,6 +904,18 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) * - 'I6c' for IPv6 addresses printed as specified by * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt + * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form + * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + * Options for %pU are: + * b big endian lower case hex (default) + * B big endian UPPER case hex + * l little endian lower case hex + * L little endian UPPER case hex + * big endian output byte order is: + * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] + * little endian output byte order is: + * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] + * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a * pointer to the real address. @@ -880,9 +930,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'F': case 'f': ptr = dereference_function_descriptor(ptr); - case 's': /* Fallthrough */ case 'S': + case 's': return symbol_string(buf, end, ptr, spec, *fmt); case 'R': case 'r': @@ -906,6 +956,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, return ip4_addr_string(buf, end, ptr, spec, fmt); } break; + case 'U': + return uuid_string(buf, end, ptr, spec, fmt); } spec.flags |= SMALL; if (spec.field_width == -1) { @@ -1023,8 +1075,8 @@ precision: qualifier: /* get the conversion qualifier */ spec->qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { + if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || + TOLOWER(*fmt) == 'z' || *fmt == 't') { spec->qualifier = *fmt++; if (unlikely(spec->qualifier == *fmt)) { if (spec->qualifier == 'l') { @@ -1091,7 +1143,7 @@ qualifier: spec->type = FORMAT_TYPE_LONG; else spec->type = FORMAT_TYPE_ULONG; - } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { + } else if (TOLOWER(spec->qualifier) == 'z') { spec->type = FORMAT_TYPE_SIZE_T; } else if (spec->qualifier == 't') { spec->type = FORMAT_TYPE_PTRDIFF; @@ -1144,8 +1196,7 @@ qualifier: int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { unsigned long long num; - char *str, *end, c; - int read; + char *str, *end; struct printf_spec spec = {0}; /* Reject out-of-range values early. Large positive sizes are @@ -1164,8 +1215,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) while (*fmt) { const char *old_fmt = fmt; - - read = format_decode(fmt, &spec); + int read = format_decode(fmt, &spec); fmt += read; @@ -1189,7 +1239,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) spec.precision = va_arg(args, int); break; - case FORMAT_TYPE_CHAR: + case FORMAT_TYPE_CHAR: { + char c; + if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) @@ -1208,6 +1260,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) ++str; } break; + } case FORMAT_TYPE_STR: str = string(str, end, va_arg(args, char *), spec); @@ -1238,8 +1291,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) if (qualifier == 'l') { long *ip = va_arg(args, long *); *ip = (str - buf); - } else if (qualifier == 'Z' || - qualifier == 'z') { + } else if (TOLOWER(qualifier) == 'z') { size_t *ip = va_arg(args, size_t *); *ip = (str - buf); } else { @@ -1322,7 +1374,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) { int i; - i=vsnprintf(buf,size,fmt,args); + i = vsnprintf(buf, size, fmt, args); + return (i >= size) ? (size - 1) : i; } EXPORT_SYMBOL(vscnprintf); @@ -1341,14 +1394,15 @@ EXPORT_SYMBOL(vscnprintf); * * See the vsnprintf() documentation for format string extensions over C99. */ -int snprintf(char * buf, size_t size, const char *fmt, ...) +int snprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); - i=vsnprintf(buf,size,fmt,args); + i = vsnprintf(buf, size, fmt, args); va_end(args); + return i; } EXPORT_SYMBOL(snprintf); @@ -1364,7 +1418,7 @@ EXPORT_SYMBOL(snprintf); * the trailing '\0'. If @size is <= 0 the function returns 0. */ -int scnprintf(char * buf, size_t size, const char *fmt, ...) +int scnprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; @@ -1372,6 +1426,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...) va_start(args, fmt); i = vsnprintf(buf, size, fmt, args); va_end(args); + return (i >= size) ? (size - 1) : i; } EXPORT_SYMBOL(scnprintf); @@ -1409,14 +1464,15 @@ EXPORT_SYMBOL(vsprintf); * * See the vsnprintf() documentation for format string extensions over C99. */ -int sprintf(char * buf, const char *fmt, ...) +int sprintf(char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); - i=vsnprintf(buf, INT_MAX, fmt, args); + i = vsnprintf(buf, INT_MAX, fmt, args); va_end(args); + return i; } EXPORT_SYMBOL(sprintf); @@ -1449,7 +1505,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) { struct printf_spec spec = {0}; char *str, *end; - int read; str = (char *)bin_buf; end = (char *)(bin_buf + size); @@ -1474,14 +1529,15 @@ do { \ str += sizeof(type); \ } while (0) - while (*fmt) { - read = format_decode(fmt, &spec); + int read = format_decode(fmt, &spec); fmt += read; switch (spec.type) { case FORMAT_TYPE_NONE: + case FORMAT_TYPE_INVALID: + case FORMAT_TYPE_PERCENT_CHAR: break; case FORMAT_TYPE_WIDTH: @@ -1496,13 +1552,14 @@ do { \ case FORMAT_TYPE_STR: { const char *save_str = va_arg(args, char *); size_t len; + if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE || (unsigned long)save_str < PAGE_SIZE) - save_str = "<NULL>"; - len = strlen(save_str); - if (str + len + 1 < end) - memcpy(str, save_str, len + 1); - str += len + 1; + save_str = "(null)"; + len = strlen(save_str) + 1; + if (str + len < end) + memcpy(str, save_str, len); + str += len; break; } @@ -1513,19 +1570,13 @@ do { \ fmt++; break; - case FORMAT_TYPE_PERCENT_CHAR: - break; - - case FORMAT_TYPE_INVALID: - break; - case FORMAT_TYPE_NRCHARS: { /* skip %n 's argument */ int qualifier = spec.qualifier; void *skip_arg; if (qualifier == 'l') skip_arg = va_arg(args, long *); - else if (qualifier == 'Z' || qualifier == 'z') + else if (TOLOWER(qualifier) == 'z') skip_arg = va_arg(args, size_t *); else skip_arg = va_arg(args, int *); @@ -1561,8 +1612,8 @@ do { \ } } } - return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; + return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; #undef save_arg } EXPORT_SYMBOL_GPL(vbin_printf); @@ -1591,11 +1642,9 @@ EXPORT_SYMBOL_GPL(vbin_printf); */ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) { - unsigned long long num; - char *str, *end, c; - const char *args = (const char *)bin_buf; - struct printf_spec spec = {0}; + char *str, *end; + const char *args = (const char *)bin_buf; if (WARN_ON_ONCE((int) size < 0)) return 0; @@ -1625,10 +1674,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) } while (*fmt) { - int read; const char *old_fmt = fmt; - - read = format_decode(fmt, &spec); + int read = format_decode(fmt, &spec); fmt += read; @@ -1652,7 +1699,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) spec.precision = get_arg(int); break; - case FORMAT_TYPE_CHAR: + case FORMAT_TYPE_CHAR: { + char c; + if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) @@ -1670,11 +1719,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) ++str; } break; + } case FORMAT_TYPE_STR: { const char *str_arg = args; - size_t len = strlen(str_arg); - args += len + 1; + args += strlen(str_arg) + 1; str = string(str, end, (char *)str_arg, spec); break; } @@ -1686,11 +1735,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) break; case FORMAT_TYPE_PERCENT_CHAR: - if (str < end) - *str = '%'; - ++str; - break; - case FORMAT_TYPE_INVALID: if (str < end) *str = '%'; @@ -1701,15 +1745,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) /* skip */ break; - default: + default: { + unsigned long long num; + switch (spec.type) { case FORMAT_TYPE_LONG_LONG: num = get_arg(long long); break; case FORMAT_TYPE_ULONG: - num = get_arg(unsigned long); - break; case FORMAT_TYPE_LONG: num = get_arg(unsigned long); break; @@ -1739,8 +1783,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) } str = number(str, end, num, spec); - } - } + } /* default: */ + } /* switch(spec.type) */ + } /* while(*fmt) */ if (size > 0) { if (str < end) @@ -1774,6 +1819,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) va_start(args, fmt); ret = vbin_printf(bin_buf, size, fmt, args); va_end(args); + return ret; } EXPORT_SYMBOL_GPL(bprintf); @@ -1786,27 +1832,23 @@ EXPORT_SYMBOL_GPL(bprintf); * @fmt: format of buffer * @args: arguments */ -int vsscanf(const char * buf, const char * fmt, va_list args) +int vsscanf(const char *buf, const char *fmt, va_list args) { const char *str = buf; char *next; char digit; int num = 0; - int qualifier; - int base; - int field_width; - int is_sign = 0; + int qualifier, base, field_width; + bool is_sign; - while(*fmt && *str) { + while (*fmt && *str) { /* skip any white space in format */ /* white space in format matchs any amount of * white space, including none, in the input. */ if (isspace(*fmt)) { - while (isspace(*fmt)) - ++fmt; - while (isspace(*str)) - ++str; + fmt = skip_spaces(++fmt); + str = skip_spaces(str); } /* anything that is not a conversion must match exactly */ @@ -1819,7 +1861,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) if (!*fmt) break; ++fmt; - + /* skip this conversion. * advance both strings to next white space */ @@ -1838,8 +1880,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args) /* get conversion qualifier */ qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt == 'Z' || *fmt == 'z') { + if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || + TOLOWER(*fmt) == 'z') { qualifier = *fmt++; if (unlikely(qualifier == *fmt)) { if (qualifier == 'h') { @@ -1851,16 +1893,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args) } } } - base = 10; - is_sign = 0; if (!*fmt || !*str) break; - switch(*fmt++) { + base = 10; + is_sign = 0; + + switch (*fmt++) { case 'c': { - char *s = (char *) va_arg(args,char*); + char *s = (char *)va_arg(args, char*); if (field_width == -1) field_width = 1; do { @@ -1871,17 +1914,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args) continue; case 's': { - char *s = (char *) va_arg(args, char *); - if(field_width == -1) + char *s = (char *)va_arg(args, char *); + if (field_width == -1) field_width = INT_MAX; /* first, skip leading white space in buffer */ - while (isspace(*str)) - str++; + str = skip_spaces(str); /* now copy until next white space */ - while (*str && !isspace(*str) && field_width--) { + while (*str && !isspace(*str) && field_width--) *s++ = *str++; - } *s = '\0'; num++; } @@ -1889,7 +1930,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) case 'n': /* return number of characters read so far */ { - int *i = (int *)va_arg(args,int*); + int *i = (int *)va_arg(args, int*); *i = str - buf; } continue; @@ -1901,14 +1942,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args) base = 16; break; case 'i': - base = 0; + base = 0; case 'd': is_sign = 1; case 'u': break; case '%': /* looking for '%' in str */ - if (*str++ != '%') + if (*str++ != '%') return num; continue; default: @@ -1919,71 +1960,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args) /* have some sort of integer conversion. * first, skip white space in buffer. */ - while (isspace(*str)) - str++; + str = skip_spaces(str); digit = *str; if (is_sign && digit == '-') digit = *(str + 1); if (!digit - || (base == 16 && !isxdigit(digit)) - || (base == 10 && !isdigit(digit)) - || (base == 8 && (!isdigit(digit) || digit > '7')) - || (base == 0 && !isdigit(digit))) - break; + || (base == 16 && !isxdigit(digit)) + || (base == 10 && !isdigit(digit)) + || (base == 8 && (!isdigit(digit) || digit > '7')) + || (base == 0 && !isdigit(digit))) + break; - switch(qualifier) { + switch (qualifier) { case 'H': /* that's 'hh' in format */ if (is_sign) { - signed char *s = (signed char *) va_arg(args,signed char *); - *s = (signed char) simple_strtol(str,&next,base); + signed char *s = (signed char *)va_arg(args, signed char *); + *s = (signed char)simple_strtol(str, &next, base); } else { - unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); - *s = (unsigned char) simple_strtoul(str, &next, base); + unsigned char *s = (unsigned char *)va_arg(args, unsigned char *); + *s = (unsigned char)simple_strtoul(str, &next, base); } break; case 'h': if (is_sign) { - short *s = (short *) va_arg(args,short *); - *s = (short) simple_strtol(str,&next,base); + short *s = (short *)va_arg(args, short *); + *s = (short)simple_strtol(str, &next, base); } else { - unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); - *s = (unsigned short) simple_strtoul(str, &next, base); + unsigned short *s = (unsigned short *)va_arg(args, unsigned short *); + *s = (unsigned short)simple_strtoul(str, &next, base); } break; case 'l': if (is_sign) { - long *l = (long *) va_arg(args,long *); - *l = simple_strtol(str,&next,base); + long *l = (long *)va_arg(args, long *); + *l = simple_strtol(str, &next, base); } else { - unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); - *l = simple_strtoul(str,&next,base); + unsigned long *l = (unsigned long *)va_arg(args, unsigned long *); + *l = simple_strtoul(str, &next, base); } break; case 'L': if (is_sign) { - long long *l = (long long*) va_arg(args,long long *); - *l = simple_strtoll(str,&next,base); + long long *l = (long long *)va_arg(args, long long *); + *l = simple_strtoll(str, &next, base); } else { - unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); - *l = simple_strtoull(str,&next,base); + unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *); + *l = simple_strtoull(str, &next, base); } break; case 'Z': case 'z': { - size_t *s = (size_t*) va_arg(args,size_t*); - *s = (size_t) simple_strtoul(str,&next,base); + size_t *s = (size_t *)va_arg(args, size_t *); + *s = (size_t)simple_strtoul(str, &next, base); } break; default: if (is_sign) { - int *i = (int *) va_arg(args, int*); - *i = (int) simple_strtol(str,&next,base); + int *i = (int *)va_arg(args, int *); + *i = (int)simple_strtol(str, &next, base); } else { - unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); - *i = (unsigned int) simple_strtoul(str,&next,base); + unsigned int *i = (unsigned int *)va_arg(args, unsigned int*); + *i = (unsigned int)simple_strtoul(str, &next, base); } break; } @@ -2014,14 +2054,15 @@ EXPORT_SYMBOL(vsscanf); * @fmt: formatting of buffer * @...: resulting arguments */ -int sscanf(const char * buf, const char * fmt, ...) +int sscanf(const char *buf, const char *fmt, ...) { va_list args; int i; - va_start(args,fmt); - i = vsscanf(buf,fmt,args); + va_start(args, fmt); + i = vsscanf(buf, fmt, args); va_end(args); + return i; } EXPORT_SYMBOL(sscanf); diff --git a/mm/Kconfig b/mm/Kconfig index 44cf6f0a3a6..2310984591e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -158,11 +158,13 @@ config PAGEFLAGS_EXTENDED # Default to 4 for wider testing, though 8 might be more appropriate. # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. +# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page. # config SPLIT_PTLOCK_CPUS int - default "4096" if ARM && !CPU_CACHE_VIPT - default "4096" if PARISC && !PA20 + default "999999" if ARM && !CPU_CACHE_VIPT + default "999999" if PARISC && !PA20 + default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC default "4" # @@ -200,14 +202,6 @@ config VIRT_TO_BUS def_bool y depends on !ARCH_NO_VIRT_TO_BUS -config HAVE_MLOCK - bool - default y if MMU=y - -config HAVE_MLOCKED_PAGE_BIT - bool - default y if HAVE_MLOCK=y - config MMU_NOTIFIER bool @@ -218,7 +212,7 @@ config KSM Enable Kernel Samepage Merging: KSM periodically scans those areas of an application's address space that an app has advised may be mergeable. When it finds pages of identical content, it replaces - the many instances by a single resident page with that content, so + the many instances by a single page with that content, so saving memory until one or another app needs to modify the content. Recommended for use with KVM, or with other duplicative applications. See Documentation/vm/ksm.txt for more information: KSM is inactive diff --git a/mm/bootmem.c b/mm/bootmem.c index d1dc23cc7f1..7d1486875e1 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -432,8 +432,8 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size, return mark_bootmem(start, end, 1, flags); } -static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx, - unsigned long step) +static unsigned long __init align_idx(struct bootmem_data *bdata, + unsigned long idx, unsigned long step) { unsigned long base = bdata->node_min_pfn; @@ -445,8 +445,8 @@ static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx, return ALIGN(base + idx, step) - base; } -static unsigned long align_off(struct bootmem_data *bdata, unsigned long off, - unsigned long align) +static unsigned long __init align_off(struct bootmem_data *bdata, + unsigned long off, unsigned long align) { unsigned long base = PFN_PHYS(bdata->node_min_pfn); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5d7601b0287..65f38c21820 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -24,6 +24,7 @@ #include <asm/io.h> #include <linux/hugetlb.h> +#include <linux/node.h> #include "internal.h" const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; @@ -622,42 +623,66 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) } /* - * Use a helper variable to find the next node and then - * copy it back to next_nid_to_alloc afterwards: - * otherwise there's a window in which a racer might - * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node. - * But we don't need to use a spin_lock here: it really - * doesn't matter if occasionally a racer chooses the - * same nid as we do. Move nid forward in the mask even - * if we just successfully allocated a hugepage so that - * the next caller gets hugepages on the next node. + * common helper functions for hstate_next_node_to_{alloc|free}. + * We may have allocated or freed a huge page based on a different + * nodes_allowed previously, so h->next_node_to_{alloc|free} might + * be outside of *nodes_allowed. Ensure that we use an allowed + * node for alloc or free. */ -static int hstate_next_node_to_alloc(struct hstate *h) +static int next_node_allowed(int nid, nodemask_t *nodes_allowed) { - int next_nid; - next_nid = next_node(h->next_nid_to_alloc, node_online_map); - if (next_nid == MAX_NUMNODES) - next_nid = first_node(node_online_map); - h->next_nid_to_alloc = next_nid; - return next_nid; + nid = next_node(nid, *nodes_allowed); + if (nid == MAX_NUMNODES) + nid = first_node(*nodes_allowed); + VM_BUG_ON(nid >= MAX_NUMNODES); + + return nid; +} + +static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) +{ + if (!node_isset(nid, *nodes_allowed)) + nid = next_node_allowed(nid, nodes_allowed); + return nid; +} + +/* + * returns the previously saved node ["this node"] from which to + * allocate a persistent huge page for the pool and advance the + * next node from which to allocate, handling wrap at end of node + * mask. + */ +static int hstate_next_node_to_alloc(struct hstate *h, + nodemask_t *nodes_allowed) +{ + int nid; + + VM_BUG_ON(!nodes_allowed); + + nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); + h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); + + return nid; } -static int alloc_fresh_huge_page(struct hstate *h) +static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) { struct page *page; int start_nid; int next_nid; int ret = 0; - start_nid = h->next_nid_to_alloc; + start_nid = hstate_next_node_to_alloc(h, nodes_allowed); next_nid = start_nid; do { page = alloc_fresh_huge_page_node(h, next_nid); - if (page) + if (page) { ret = 1; - next_nid = hstate_next_node_to_alloc(h); - } while (!page && next_nid != start_nid); + break; + } + next_nid = hstate_next_node_to_alloc(h, nodes_allowed); + } while (next_nid != start_nid); if (ret) count_vm_event(HTLB_BUDDY_PGALLOC); @@ -668,17 +693,21 @@ static int alloc_fresh_huge_page(struct hstate *h) } /* - * helper for free_pool_huge_page() - find next node - * from which to free a huge page + * helper for free_pool_huge_page() - return the previously saved + * node ["this node"] from which to free a huge page. Advance the + * next node id whether or not we find a free huge page to free so + * that the next attempt to free addresses the next node. */ -static int hstate_next_node_to_free(struct hstate *h) +static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) { - int next_nid; - next_nid = next_node(h->next_nid_to_free, node_online_map); - if (next_nid == MAX_NUMNODES) - next_nid = first_node(node_online_map); - h->next_nid_to_free = next_nid; - return next_nid; + int nid; + + VM_BUG_ON(!nodes_allowed); + + nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); + h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); + + return nid; } /* @@ -687,13 +716,14 @@ static int hstate_next_node_to_free(struct hstate *h) * balanced over allowed nodes. * Called with hugetlb_lock locked. */ -static int free_pool_huge_page(struct hstate *h, bool acct_surplus) +static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, + bool acct_surplus) { int start_nid; int next_nid; int ret = 0; - start_nid = h->next_nid_to_free; + start_nid = hstate_next_node_to_free(h, nodes_allowed); next_nid = start_nid; do { @@ -715,9 +745,10 @@ static int free_pool_huge_page(struct hstate *h, bool acct_surplus) } update_and_free_page(h, page); ret = 1; + break; } - next_nid = hstate_next_node_to_free(h); - } while (!ret && next_nid != start_nid); + next_nid = hstate_next_node_to_free(h, nodes_allowed); + } while (next_nid != start_nid); return ret; } @@ -911,14 +942,14 @@ static void return_unused_surplus_pages(struct hstate *h, /* * We want to release as many surplus pages as possible, spread - * evenly across all nodes. Iterate across all nodes until we - * can no longer free unreserved surplus pages. This occurs when - * the nodes with surplus pages have no free pages. - * free_pool_huge_page() will balance the the frees across the - * on-line nodes for us and will handle the hstate accounting. + * evenly across all nodes with memory. Iterate across these nodes + * until we can no longer free unreserved surplus pages. This occurs + * when the nodes with surplus pages have no free pages. + * free_pool_huge_page() will balance the the freed pages across the + * on-line nodes with memory and will handle the hstate accounting. */ while (nr_pages--) { - if (!free_pool_huge_page(h, 1)) + if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1)) break; } } @@ -1022,16 +1053,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, int __weak alloc_bootmem_huge_page(struct hstate *h) { struct huge_bootmem_page *m; - int nr_nodes = nodes_weight(node_online_map); + int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); while (nr_nodes) { void *addr; addr = __alloc_bootmem_node_nopanic( - NODE_DATA(h->next_nid_to_alloc), + NODE_DATA(hstate_next_node_to_alloc(h, + &node_states[N_HIGH_MEMORY])), huge_page_size(h), huge_page_size(h), 0); - hstate_next_node_to_alloc(h); if (addr) { /* * Use the beginning of the huge page to store the @@ -1084,7 +1115,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) if (h->order >= MAX_ORDER) { if (!alloc_bootmem_huge_page(h)) break; - } else if (!alloc_fresh_huge_page(h)) + } else if (!alloc_fresh_huge_page(h, + &node_states[N_HIGH_MEMORY])) break; } h->max_huge_pages = i; @@ -1126,14 +1158,15 @@ static void __init report_hugepages(void) } #ifdef CONFIG_HIGHMEM -static void try_to_free_low(struct hstate *h, unsigned long count) +static void try_to_free_low(struct hstate *h, unsigned long count, + nodemask_t *nodes_allowed) { int i; if (h->order >= MAX_ORDER) return; - for (i = 0; i < MAX_NUMNODES; ++i) { + for_each_node_mask(i, *nodes_allowed) { struct page *page, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(page, next, freel, lru) { @@ -1149,7 +1182,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count) } } #else -static inline void try_to_free_low(struct hstate *h, unsigned long count) +static inline void try_to_free_low(struct hstate *h, unsigned long count, + nodemask_t *nodes_allowed) { } #endif @@ -1159,7 +1193,8 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count) * balanced by operating on them in a round-robin fashion. * Returns 1 if an adjustment was made. */ -static int adjust_pool_surplus(struct hstate *h, int delta) +static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, + int delta) { int start_nid, next_nid; int ret = 0; @@ -1167,29 +1202,33 @@ static int adjust_pool_surplus(struct hstate *h, int delta) VM_BUG_ON(delta != -1 && delta != 1); if (delta < 0) - start_nid = h->next_nid_to_alloc; + start_nid = hstate_next_node_to_alloc(h, nodes_allowed); else - start_nid = h->next_nid_to_free; + start_nid = hstate_next_node_to_free(h, nodes_allowed); next_nid = start_nid; do { int nid = next_nid; if (delta < 0) { - next_nid = hstate_next_node_to_alloc(h); /* * To shrink on this node, there must be a surplus page */ - if (!h->surplus_huge_pages_node[nid]) + if (!h->surplus_huge_pages_node[nid]) { + next_nid = hstate_next_node_to_alloc(h, + nodes_allowed); continue; + } } if (delta > 0) { - next_nid = hstate_next_node_to_free(h); /* * Surplus cannot exceed the total number of pages */ if (h->surplus_huge_pages_node[nid] >= - h->nr_huge_pages_node[nid]) + h->nr_huge_pages_node[nid]) { + next_nid = hstate_next_node_to_free(h, + nodes_allowed); continue; + } } h->surplus_huge_pages += delta; @@ -1202,7 +1241,8 @@ static int adjust_pool_surplus(struct hstate *h, int delta) } #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) -static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) +static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, + nodemask_t *nodes_allowed) { unsigned long min_count, ret; @@ -1222,7 +1262,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) */ spin_lock(&hugetlb_lock); while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { - if (!adjust_pool_surplus(h, -1)) + if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } @@ -1233,11 +1273,14 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) * and reducing the surplus. */ spin_unlock(&hugetlb_lock); - ret = alloc_fresh_huge_page(h); + ret = alloc_fresh_huge_page(h, nodes_allowed); spin_lock(&hugetlb_lock); if (!ret) goto out; + /* Bail for signals. Probably ctrl-c from user */ + if (signal_pending(current)) + goto out; } /* @@ -1257,13 +1300,13 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) */ min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); - try_to_free_low(h, min_count); + try_to_free_low(h, min_count, nodes_allowed); while (min_count < persistent_huge_pages(h)) { - if (!free_pool_huge_page(h, 0)) + if (!free_pool_huge_page(h, nodes_allowed, 0)) break; } while (count < persistent_huge_pages(h)) { - if (!adjust_pool_surplus(h, 1)) + if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: @@ -1282,43 +1325,117 @@ out: static struct kobject *hugepages_kobj; static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; -static struct hstate *kobj_to_hstate(struct kobject *kobj) +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); + +static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) { int i; + for (i = 0; i < HUGE_MAX_HSTATE; i++) - if (hstate_kobjs[i] == kobj) + if (hstate_kobjs[i] == kobj) { + if (nidp) + *nidp = NUMA_NO_NODE; return &hstates[i]; - BUG(); - return NULL; + } + + return kobj_to_node_hstate(kobj, nidp); } -static ssize_t nr_hugepages_show(struct kobject *kobj, +static ssize_t nr_hugepages_show_common(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); - return sprintf(buf, "%lu\n", h->nr_huge_pages); + struct hstate *h; + unsigned long nr_huge_pages; + int nid; + + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) + nr_huge_pages = h->nr_huge_pages; + else + nr_huge_pages = h->nr_huge_pages_node[nid]; + + return sprintf(buf, "%lu\n", nr_huge_pages); } -static ssize_t nr_hugepages_store(struct kobject *kobj, - struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t nr_hugepages_store_common(bool obey_mempolicy, + struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t len) { int err; - unsigned long input; - struct hstate *h = kobj_to_hstate(kobj); + int nid; + unsigned long count; + struct hstate *h; + NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); - err = strict_strtoul(buf, 10, &input); + err = strict_strtoul(buf, 10, &count); if (err) return 0; - h->max_huge_pages = set_max_huge_pages(h, input); + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) { + /* + * global hstate attribute + */ + if (!(obey_mempolicy && + init_nodemask_of_mempolicy(nodes_allowed))) { + NODEMASK_FREE(nodes_allowed); + nodes_allowed = &node_states[N_HIGH_MEMORY]; + } + } else if (nodes_allowed) { + /* + * per node hstate attribute: adjust count to global, + * but restrict alloc/free to the specified node. + */ + count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; + init_nodemask_of_node(nodes_allowed, nid); + } else + nodes_allowed = &node_states[N_HIGH_MEMORY]; + + h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); - return count; + if (nodes_allowed != &node_states[N_HIGH_MEMORY]) + NODEMASK_FREE(nodes_allowed); + + return len; +} + +static ssize_t nr_hugepages_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return nr_hugepages_show_common(kobj, attr, buf); +} + +static ssize_t nr_hugepages_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t len) +{ + return nr_hugepages_store_common(false, kobj, attr, buf, len); } HSTATE_ATTR(nr_hugepages); +#ifdef CONFIG_NUMA + +/* + * hstate attribute for optionally mempolicy-based constraint on persistent + * huge page alloc/free. + */ +static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return nr_hugepages_show_common(kobj, attr, buf); +} + +static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t len) +{ + return nr_hugepages_store_common(true, kobj, attr, buf, len); +} +HSTATE_ATTR(nr_hugepages_mempolicy); +#endif + + static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); + struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); } static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, @@ -1326,7 +1443,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, { int err; unsigned long input; - struct hstate *h = kobj_to_hstate(kobj); + struct hstate *h = kobj_to_hstate(kobj, NULL); err = strict_strtoul(buf, 10, &input); if (err) @@ -1343,15 +1460,24 @@ HSTATE_ATTR(nr_overcommit_hugepages); static ssize_t free_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); - return sprintf(buf, "%lu\n", h->free_huge_pages); + struct hstate *h; + unsigned long free_huge_pages; + int nid; + + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) + free_huge_pages = h->free_huge_pages; + else + free_huge_pages = h->free_huge_pages_node[nid]; + + return sprintf(buf, "%lu\n", free_huge_pages); } HSTATE_ATTR_RO(free_hugepages); static ssize_t resv_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); + struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->resv_huge_pages); } HSTATE_ATTR_RO(resv_hugepages); @@ -1359,8 +1485,17 @@ HSTATE_ATTR_RO(resv_hugepages); static ssize_t surplus_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); - return sprintf(buf, "%lu\n", h->surplus_huge_pages); + struct hstate *h; + unsigned long surplus_huge_pages; + int nid; + + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) + surplus_huge_pages = h->surplus_huge_pages; + else + surplus_huge_pages = h->surplus_huge_pages_node[nid]; + + return sprintf(buf, "%lu\n", surplus_huge_pages); } HSTATE_ATTR_RO(surplus_hugepages); @@ -1370,6 +1505,9 @@ static struct attribute *hstate_attrs[] = { &free_hugepages_attr.attr, &resv_hugepages_attr.attr, &surplus_hugepages_attr.attr, +#ifdef CONFIG_NUMA + &nr_hugepages_mempolicy_attr.attr, +#endif NULL, }; @@ -1377,19 +1515,21 @@ static struct attribute_group hstate_attr_group = { .attrs = hstate_attrs, }; -static int __init hugetlb_sysfs_add_hstate(struct hstate *h) +static int __init hugetlb_sysfs_add_hstate(struct hstate *h, + struct kobject *parent, + struct kobject **hstate_kobjs, + struct attribute_group *hstate_attr_group) { int retval; + int hi = h - hstates; - hstate_kobjs[h - hstates] = kobject_create_and_add(h->name, - hugepages_kobj); - if (!hstate_kobjs[h - hstates]) + hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); + if (!hstate_kobjs[hi]) return -ENOMEM; - retval = sysfs_create_group(hstate_kobjs[h - hstates], - &hstate_attr_group); + retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); if (retval) - kobject_put(hstate_kobjs[h - hstates]); + kobject_put(hstate_kobjs[hi]); return retval; } @@ -1404,17 +1544,184 @@ static void __init hugetlb_sysfs_init(void) return; for_each_hstate(h) { - err = hugetlb_sysfs_add_hstate(h); + err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, + hstate_kobjs, &hstate_attr_group); if (err) printk(KERN_ERR "Hugetlb: Unable to add hstate %s", h->name); } } +#ifdef CONFIG_NUMA + +/* + * node_hstate/s - associate per node hstate attributes, via their kobjects, + * with node sysdevs in node_devices[] using a parallel array. The array + * index of a node sysdev or _hstate == node id. + * This is here to avoid any static dependency of the node sysdev driver, in + * the base kernel, on the hugetlb module. + */ +struct node_hstate { + struct kobject *hugepages_kobj; + struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; +}; +struct node_hstate node_hstates[MAX_NUMNODES]; + +/* + * A subset of global hstate attributes for node sysdevs + */ +static struct attribute *per_node_hstate_attrs[] = { + &nr_hugepages_attr.attr, + &free_hugepages_attr.attr, + &surplus_hugepages_attr.attr, + NULL, +}; + +static struct attribute_group per_node_hstate_attr_group = { + .attrs = per_node_hstate_attrs, +}; + +/* + * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj. + * Returns node id via non-NULL nidp. + */ +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) +{ + int nid; + + for (nid = 0; nid < nr_node_ids; nid++) { + struct node_hstate *nhs = &node_hstates[nid]; + int i; + for (i = 0; i < HUGE_MAX_HSTATE; i++) + if (nhs->hstate_kobjs[i] == kobj) { + if (nidp) + *nidp = nid; + return &hstates[i]; + } + } + + BUG(); + return NULL; +} + +/* + * Unregister hstate attributes from a single node sysdev. + * No-op if no hstate attributes attached. + */ +void hugetlb_unregister_node(struct node *node) +{ + struct hstate *h; + struct node_hstate *nhs = &node_hstates[node->sysdev.id]; + + if (!nhs->hugepages_kobj) + return; /* no hstate attributes */ + + for_each_hstate(h) + if (nhs->hstate_kobjs[h - hstates]) { + kobject_put(nhs->hstate_kobjs[h - hstates]); + nhs->hstate_kobjs[h - hstates] = NULL; + } + + kobject_put(nhs->hugepages_kobj); + nhs->hugepages_kobj = NULL; +} + +/* + * hugetlb module exit: unregister hstate attributes from node sysdevs + * that have them. + */ +static void hugetlb_unregister_all_nodes(void) +{ + int nid; + + /* + * disable node sysdev registrations. + */ + register_hugetlbfs_with_node(NULL, NULL); + + /* + * remove hstate attributes from any nodes that have them. + */ + for (nid = 0; nid < nr_node_ids; nid++) + hugetlb_unregister_node(&node_devices[nid]); +} + +/* + * Register hstate attributes for a single node sysdev. + * No-op if attributes already registered. + */ +void hugetlb_register_node(struct node *node) +{ + struct hstate *h; + struct node_hstate *nhs = &node_hstates[node->sysdev.id]; + int err; + + if (nhs->hugepages_kobj) + return; /* already allocated */ + + nhs->hugepages_kobj = kobject_create_and_add("hugepages", + &node->sysdev.kobj); + if (!nhs->hugepages_kobj) + return; + + for_each_hstate(h) { + err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, + nhs->hstate_kobjs, + &per_node_hstate_attr_group); + if (err) { + printk(KERN_ERR "Hugetlb: Unable to add hstate %s" + " for node %d\n", + h->name, node->sysdev.id); + hugetlb_unregister_node(node); + break; + } + } +} + +/* + * hugetlb init time: register hstate attributes for all registered node + * sysdevs of nodes that have memory. All on-line nodes should have + * registered their associated sysdev by this time. + */ +static void hugetlb_register_all_nodes(void) +{ + int nid; + + for_each_node_state(nid, N_HIGH_MEMORY) { + struct node *node = &node_devices[nid]; + if (node->sysdev.id == nid) + hugetlb_register_node(node); + } + + /* + * Let the node sysdev driver know we're here so it can + * [un]register hstate attributes on node hotplug. + */ + register_hugetlbfs_with_node(hugetlb_register_node, + hugetlb_unregister_node); +} +#else /* !CONFIG_NUMA */ + +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) +{ + BUG(); + if (nidp) + *nidp = -1; + return NULL; +} + +static void hugetlb_unregister_all_nodes(void) { } + +static void hugetlb_register_all_nodes(void) { } + +#endif + static void __exit hugetlb_exit(void) { struct hstate *h; + hugetlb_unregister_all_nodes(); + for_each_hstate(h) { kobject_put(hstate_kobjs[h - hstates]); } @@ -1449,6 +1756,8 @@ static int __init hugetlb_init(void) hugetlb_sysfs_init(); + hugetlb_register_all_nodes(); + return 0; } module_init(hugetlb_init); @@ -1472,8 +1781,8 @@ void __init hugetlb_add_hstate(unsigned order) h->free_huge_pages = 0; for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); - h->next_nid_to_alloc = first_node(node_online_map); - h->next_nid_to_free = first_node(node_online_map); + h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); + h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); @@ -1536,9 +1845,9 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL -int hugetlb_sysctl_handler(struct ctl_table *table, int write, - void __user *buffer, - size_t *length, loff_t *ppos) +static int hugetlb_sysctl_handler_common(bool obey_mempolicy, + struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp; @@ -1550,12 +1859,40 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write, table->maxlen = sizeof(unsigned long); proc_doulongvec_minmax(table, write, buffer, length, ppos); - if (write) - h->max_huge_pages = set_max_huge_pages(h, tmp); + if (write) { + NODEMASK_ALLOC(nodemask_t, nodes_allowed, + GFP_KERNEL | __GFP_NORETRY); + if (!(obey_mempolicy && + init_nodemask_of_mempolicy(nodes_allowed))) { + NODEMASK_FREE(nodes_allowed); + nodes_allowed = &node_states[N_HIGH_MEMORY]; + } + h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); + + if (nodes_allowed != &node_states[N_HIGH_MEMORY]) + NODEMASK_FREE(nodes_allowed); + } return 0; } +int hugetlb_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + + return hugetlb_sysctl_handler_common(false, table, write, + buffer, length, ppos); +} + +#ifdef CONFIG_NUMA +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + return hugetlb_sysctl_handler_common(true, table, write, + buffer, length, ppos); +} +#endif /* CONFIG_NUMA */ + int hugetlb_treat_movable_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -1903,6 +2240,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, + (vma->vm_pgoff >> PAGE_SHIFT); mapping = (struct address_space *)page_private(page); + /* + * Take the mapping lock for the duration of the table walk. As + * this mapping should be shared between all the VMAs, + * __unmap_hugepage_range() is called as the lock is already held + */ + spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) @@ -1916,10 +2259,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) - unmap_hugepage_range(iter_vma, + __unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page); } + spin_unlock(&mapping->i_mmap_lock); return 1; } @@ -1959,6 +2303,9 @@ retry_avoidcopy: outside_reserve = 1; page_cache_get(old_page); + + /* Drop page_table_lock as buddy allocator may be called */ + spin_unlock(&mm->page_table_lock); new_page = alloc_huge_page(vma, address, outside_reserve); if (IS_ERR(new_page)) { @@ -1976,19 +2323,25 @@ retry_avoidcopy: if (unmap_ref_private(mm, vma, old_page, address)) { BUG_ON(page_count(old_page) != 1); BUG_ON(huge_pte_none(pte)); + spin_lock(&mm->page_table_lock); goto retry_avoidcopy; } WARN_ON_ONCE(1); } + /* Caller expects lock to be held */ + spin_lock(&mm->page_table_lock); return -PTR_ERR(new_page); } - spin_unlock(&mm->page_table_lock); copy_huge_page(new_page, old_page, address, vma); __SetPageUptodate(new_page); - spin_lock(&mm->page_table_lock); + /* + * Retake the page_table_lock to check for racing updates + * before the page tables are altered + */ + spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & huge_page_mask(h)); if (likely(pte_same(huge_ptep_get(ptep), pte))) { /* Break COW */ diff --git a/mm/internal.h b/mm/internal.h index 22ec8d2b0fb..4fe67a162cb 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -63,7 +63,7 @@ static inline unsigned long page_order(struct page *page) return page_private(page); } -#ifdef CONFIG_HAVE_MLOCK +#ifdef CONFIG_MMU extern long mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void munlock_vma_pages_range(struct vm_area_struct *vma, @@ -72,21 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) { munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); } -#endif - -/* - * unevictable_migrate_page() called only from migrate_page_copy() to - * migrate unevictable flag to new page. - * Note that the old page has been isolated from the LRU lists at this - * point so we don't need to worry about LRU statistics. - */ -static inline void unevictable_migrate_page(struct page *new, struct page *old) -{ - if (TestClearPageUnevictable(old)) - SetPageUnevictable(new); -} -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT /* * Called only in fault path via page_evictable() for a new page * to determine if it's being mapped into a LOCKED vma. @@ -107,9 +93,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) } /* - * must be called with vma's mmap_sem held for read, and page locked. + * must be called with vma's mmap_sem held for read or write, and page locked. */ extern void mlock_vma_page(struct page *page); +extern void munlock_vma_page(struct page *page); /* * Clear the page's PageMlocked(). This can be useful in a situation where @@ -144,7 +131,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) } } -#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#else /* !CONFIG_MMU */ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) { return 0; @@ -153,7 +140,7 @@ static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } -#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#endif /* !CONFIG_MMU */ /* * Return the mem_map entry representing the 'offset' subpage within @@ -29,11 +29,13 @@ #include <linux/wait.h> #include <linux/slab.h> #include <linux/rbtree.h> +#include <linux/memory.h> #include <linux/mmu_notifier.h> #include <linux/swap.h> #include <linux/ksm.h> #include <asm/tlbflush.h> +#include "internal.h" /* * A few notes about the KSM scanning process, @@ -79,13 +81,13 @@ * struct mm_slot - ksm information per mm that is being scanned * @link: link to the mm_slots hash list * @mm_list: link into the mm_slots list, rooted in ksm_mm_head - * @rmap_list: head for this mm_slot's list of rmap_items + * @rmap_list: head for this mm_slot's singly-linked list of rmap_items * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node link; struct list_head mm_list; - struct list_head rmap_list; + struct rmap_item *rmap_list; struct mm_struct *mm; }; @@ -93,7 +95,7 @@ struct mm_slot { * struct ksm_scan - cursor for scanning * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned - * @rmap_item: the current rmap that we are scanning inside the rmap_list + * @rmap_list: link to the next rmap to be scanned in the rmap_list * @seqnr: count of completed full scans (needed when removing unstable node) * * There is only the one ksm_scan instance of this cursor structure. @@ -101,37 +103,51 @@ struct mm_slot { struct ksm_scan { struct mm_slot *mm_slot; unsigned long address; - struct rmap_item *rmap_item; + struct rmap_item **rmap_list; unsigned long seqnr; }; /** + * struct stable_node - node of the stable rbtree + * @node: rb node of this ksm page in the stable tree + * @hlist: hlist head of rmap_items using this ksm page + * @kpfn: page frame number of this ksm page + */ +struct stable_node { + struct rb_node node; + struct hlist_head hlist; + unsigned long kpfn; +}; + +/** * struct rmap_item - reverse mapping item for virtual addresses - * @link: link into mm_slot's rmap_list (rmap_list is per mm) + * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list + * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree * @mm: the memory structure this rmap_item is pointing into * @address: the virtual address this rmap_item tracks (+ flags in low bits) * @oldchecksum: previous checksum of the page at that virtual address - * @node: rb_node of this rmap_item in either unstable or stable tree - * @next: next rmap_item hanging off the same node of the stable tree - * @prev: previous rmap_item hanging off the same node of the stable tree + * @node: rb node of this rmap_item in the unstable tree + * @head: pointer to stable_node heading this list in the stable tree + * @hlist: link into hlist of rmap_items hanging off that stable_node */ struct rmap_item { - struct list_head link; + struct rmap_item *rmap_list; + struct anon_vma *anon_vma; /* when stable */ struct mm_struct *mm; unsigned long address; /* + low bits used for flags below */ + unsigned int oldchecksum; /* when unstable */ union { - unsigned int oldchecksum; /* when unstable */ - struct rmap_item *next; /* when stable */ - }; - union { - struct rb_node node; /* when tree node */ - struct rmap_item *prev; /* in stable list */ + struct rb_node node; /* when node of unstable tree */ + struct { /* when listed from stable tree */ + struct stable_node *head; + struct hlist_node hlist; + }; }; }; #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ -#define NODE_FLAG 0x100 /* is a node of unstable or stable tree */ -#define STABLE_FLAG 0x200 /* is a node or list item of stable tree */ +#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ +#define STABLE_FLAG 0x200 /* is listed from the stable tree */ /* The stable and unstable tree heads */ static struct rb_root root_stable_tree = RB_ROOT; @@ -148,6 +164,7 @@ static struct ksm_scan ksm_scan = { }; static struct kmem_cache *rmap_item_cache; +static struct kmem_cache *stable_node_cache; static struct kmem_cache *mm_slot_cache; /* The number of nodes in the stable tree */ @@ -162,9 +179,6 @@ static unsigned long ksm_pages_unshared; /* The number of rmap_items in use: to calculate pages_volatile */ static unsigned long ksm_rmap_items; -/* Limit on the number of unswappable pages used */ -static unsigned long ksm_max_kernel_pages; - /* Number of pages ksmd should scan in one batch */ static unsigned int ksm_thread_pages_to_scan = 100; @@ -190,13 +204,19 @@ static int __init ksm_slab_init(void) if (!rmap_item_cache) goto out; + stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); + if (!stable_node_cache) + goto out_free1; + mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); if (!mm_slot_cache) - goto out_free; + goto out_free2; return 0; -out_free: +out_free2: + kmem_cache_destroy(stable_node_cache); +out_free1: kmem_cache_destroy(rmap_item_cache); out: return -ENOMEM; @@ -205,6 +225,7 @@ out: static void __init ksm_slab_free(void) { kmem_cache_destroy(mm_slot_cache); + kmem_cache_destroy(stable_node_cache); kmem_cache_destroy(rmap_item_cache); mm_slot_cache = NULL; } @@ -226,6 +247,16 @@ static inline void free_rmap_item(struct rmap_item *rmap_item) kmem_cache_free(rmap_item_cache, rmap_item); } +static inline struct stable_node *alloc_stable_node(void) +{ + return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); +} + +static inline void free_stable_node(struct stable_node *stable_node) +{ + kmem_cache_free(stable_node_cache, stable_node); +} + static inline struct mm_slot *alloc_mm_slot(void) { if (!mm_slot_cache) /* initialization failed */ @@ -275,7 +306,6 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) % MM_SLOTS_HASH_HEADS]; mm_slot->mm = mm; - INIT_LIST_HEAD(&mm_slot->rmap_list); hlist_add_head(&mm_slot->link, bucket); } @@ -284,6 +314,25 @@ static inline int in_stable_tree(struct rmap_item *rmap_item) return rmap_item->address & STABLE_FLAG; } +static void hold_anon_vma(struct rmap_item *rmap_item, + struct anon_vma *anon_vma) +{ + rmap_item->anon_vma = anon_vma; + atomic_inc(&anon_vma->ksm_refcount); +} + +static void drop_anon_vma(struct rmap_item *rmap_item) +{ + struct anon_vma *anon_vma = rmap_item->anon_vma; + + if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) { + int empty = list_empty(&anon_vma->head); + spin_unlock(&anon_vma->lock); + if (empty) + anon_vma_free(anon_vma); + } +} + /* * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's * page tables after it has passed through ksm_exit() - which, if necessary, @@ -356,10 +405,18 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; } -static void break_cow(struct mm_struct *mm, unsigned long addr) +static void break_cow(struct rmap_item *rmap_item) { + struct mm_struct *mm = rmap_item->mm; + unsigned long addr = rmap_item->address; struct vm_area_struct *vma; + /* + * It is not an accident that whenever we want to break COW + * to undo, we also need to drop a reference to the anon_vma. + */ + drop_anon_vma(rmap_item); + down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) goto out; @@ -403,21 +460,77 @@ out: page = NULL; return page; } +static void remove_node_from_stable_tree(struct stable_node *stable_node) +{ + struct rmap_item *rmap_item; + struct hlist_node *hlist; + + hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + if (rmap_item->hlist.next) + ksm_pages_sharing--; + else + ksm_pages_shared--; + drop_anon_vma(rmap_item); + rmap_item->address &= PAGE_MASK; + cond_resched(); + } + + rb_erase(&stable_node->node, &root_stable_tree); + free_stable_node(stable_node); +} + /* - * get_ksm_page: checks if the page at the virtual address in rmap_item - * is still PageKsm, in which case we can trust the content of the page, - * and it returns the gotten page; but NULL if the page has been zapped. + * get_ksm_page: checks if the page indicated by the stable node + * is still its ksm page, despite having held no reference to it. + * In which case we can trust the content of the page, and it + * returns the gotten page; but if the page has now been zapped, + * remove the stale node from the stable tree and return NULL. + * + * You would expect the stable_node to hold a reference to the ksm page. + * But if it increments the page's count, swapping out has to wait for + * ksmd to come around again before it can free the page, which may take + * seconds or even minutes: much too unresponsive. So instead we use a + * "keyhole reference": access to the ksm page from the stable node peeps + * out through its keyhole to see if that page still holds the right key, + * pointing back to this stable node. This relies on freeing a PageAnon + * page to reset its page->mapping to NULL, and relies on no other use of + * a page to put something that might look like our key in page->mapping. + * + * include/linux/pagemap.h page_cache_get_speculative() is a good reference, + * but this is different - made simpler by ksm_thread_mutex being held, but + * interesting for assuming that no other use of the struct page could ever + * put our expected_mapping into page->mapping (or a field of the union which + * coincides with page->mapping). The RCU calls are not for KSM at all, but + * to keep the page_count protocol described with page_cache_get_speculative. + * + * Note: it is possible that get_ksm_page() will return NULL one moment, + * then page the next, if the page is in between page_freeze_refs() and + * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page + * is on its way to being freed; but it is an anomaly to bear in mind. */ -static struct page *get_ksm_page(struct rmap_item *rmap_item) +static struct page *get_ksm_page(struct stable_node *stable_node) { struct page *page; - - page = get_mergeable_page(rmap_item); - if (page && !PageKsm(page)) { + void *expected_mapping; + + page = pfn_to_page(stable_node->kpfn); + expected_mapping = (void *)stable_node + + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + rcu_read_lock(); + if (page->mapping != expected_mapping) + goto stale; + if (!get_page_unless_zero(page)) + goto stale; + if (page->mapping != expected_mapping) { put_page(page); - page = NULL; + goto stale; } + rcu_read_unlock(); return page; +stale: + rcu_read_unlock(); + remove_node_from_stable_tree(stable_node); + return NULL; } /* @@ -426,35 +539,29 @@ static struct page *get_ksm_page(struct rmap_item *rmap_item) */ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) { - if (in_stable_tree(rmap_item)) { - struct rmap_item *next_item = rmap_item->next; - - if (rmap_item->address & NODE_FLAG) { - if (next_item) { - rb_replace_node(&rmap_item->node, - &next_item->node, - &root_stable_tree); - next_item->address |= NODE_FLAG; - ksm_pages_sharing--; - } else { - rb_erase(&rmap_item->node, &root_stable_tree); - ksm_pages_shared--; - } - } else { - struct rmap_item *prev_item = rmap_item->prev; + if (rmap_item->address & STABLE_FLAG) { + struct stable_node *stable_node; + struct page *page; - BUG_ON(prev_item->next != rmap_item); - prev_item->next = next_item; - if (next_item) { - BUG_ON(next_item->prev != rmap_item); - next_item->prev = rmap_item->prev; - } + stable_node = rmap_item->head; + page = get_ksm_page(stable_node); + if (!page) + goto out; + + lock_page(page); + hlist_del(&rmap_item->hlist); + unlock_page(page); + put_page(page); + + if (stable_node->hlist.first) ksm_pages_sharing--; - } + else + ksm_pages_shared--; - rmap_item->next = NULL; + drop_anon_vma(rmap_item); + rmap_item->address &= PAGE_MASK; - } else if (rmap_item->address & NODE_FLAG) { + } else if (rmap_item->address & UNSTABLE_FLAG) { unsigned char age; /* * Usually ksmd can and must skip the rb_erase, because @@ -467,24 +574,21 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) BUG_ON(age > 1); if (!age) rb_erase(&rmap_item->node, &root_unstable_tree); + ksm_pages_unshared--; + rmap_item->address &= PAGE_MASK; } - - rmap_item->address &= PAGE_MASK; - +out: cond_resched(); /* we're called from many long loops */ } static void remove_trailing_rmap_items(struct mm_slot *mm_slot, - struct list_head *cur) + struct rmap_item **rmap_list) { - struct rmap_item *rmap_item; - - while (cur != &mm_slot->rmap_list) { - rmap_item = list_entry(cur, struct rmap_item, link); - cur = cur->next; + while (*rmap_list) { + struct rmap_item *rmap_item = *rmap_list; + *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); - list_del(&rmap_item->link); free_rmap_item(rmap_item); } } @@ -550,7 +654,7 @@ static int unmerge_and_remove_all_rmap_items(void) goto error; } - remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); + remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, @@ -646,7 +750,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * Check that no O_DIRECT or similar I/O is in progress on the * page */ - if ((page_mapcount(page) + 2 + swapped) != page_count(page)) { + if (page_mapcount(page) + 1 + swapped != page_count(page)) { set_pte_at_notify(mm, addr, ptep, entry); goto out_unlock; } @@ -664,15 +768,15 @@ out: /** * replace_page - replace page in vma by new ksm page - * @vma: vma that holds the pte pointing to oldpage - * @oldpage: the page we are replacing by newpage - * @newpage: the ksm page we replace oldpage by + * @vma: vma that holds the pte pointing to page + * @page: the page we are replacing by kpage + * @kpage: the ksm page we replace page by * @orig_pte: the original value of the pte * * Returns 0 on success, -EFAULT on failure. */ -static int replace_page(struct vm_area_struct *vma, struct page *oldpage, - struct page *newpage, pte_t orig_pte) +static int replace_page(struct vm_area_struct *vma, struct page *page, + struct page *kpage, pte_t orig_pte) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; @@ -681,12 +785,9 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage, pte_t *ptep; spinlock_t *ptl; unsigned long addr; - pgprot_t prot; int err = -EFAULT; - prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE); - - addr = page_address_in_vma(oldpage, vma); + addr = page_address_in_vma(page, vma); if (addr == -EFAULT) goto out; @@ -708,15 +809,15 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage, goto out; } - get_page(newpage); - page_add_ksm_rmap(newpage); + get_page(kpage); + page_add_anon_rmap(kpage, vma, addr); flush_cache_page(vma, addr, pte_pfn(*ptep)); ptep_clear_flush(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot)); + set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); - page_remove_rmap(oldpage); - put_page(oldpage); + page_remove_rmap(page); + put_page(page); pte_unmap_unlock(ptep, ptl); err = 0; @@ -726,32 +827,27 @@ out: /* * try_to_merge_one_page - take two pages and merge them into one - * @vma: the vma that hold the pte pointing into oldpage - * @oldpage: the page that we want to replace with newpage - * @newpage: the page that we want to map instead of oldpage - * - * Note: - * oldpage should be a PageAnon page, while newpage should be a PageKsm page, - * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm. + * @vma: the vma that holds the pte pointing to page + * @page: the PageAnon page that we want to replace with kpage + * @kpage: the PageKsm page that we want to map instead of page, + * or NULL the first time when we want to use page as kpage. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_one_page(struct vm_area_struct *vma, - struct page *oldpage, - struct page *newpage) + struct page *page, struct page *kpage) { pte_t orig_pte = __pte(0); int err = -EFAULT; + if (page == kpage) /* ksm page forked */ + return 0; + if (!(vma->vm_flags & VM_MERGEABLE)) goto out; - - if (!PageAnon(oldpage)) + if (!PageAnon(page)) goto out; - get_page(newpage); - get_page(oldpage); - /* * We need the page lock to read a stable PageSwapCache in * write_protect_page(). We use trylock_page() instead of @@ -759,26 +855,39 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, * prefer to continue scanning and merging different pages, * then come back to this page when it is unlocked. */ - if (!trylock_page(oldpage)) - goto out_putpage; + if (!trylock_page(page)) + goto out; /* * If this anonymous page is mapped only here, its pte may need * to be write-protected. If it's mapped elsewhere, all of its * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ - if (write_protect_page(vma, oldpage, &orig_pte)) { - unlock_page(oldpage); - goto out_putpage; + if (write_protect_page(vma, page, &orig_pte) == 0) { + if (!kpage) { + /* + * While we hold page lock, upgrade page from + * PageAnon+anon_vma to PageKsm+NULL stable_node: + * stable_tree_insert() will update stable_node. + */ + set_page_stable_node(page, NULL); + mark_page_accessed(page); + err = 0; + } else if (pages_identical(page, kpage)) + err = replace_page(vma, page, kpage, orig_pte); } - unlock_page(oldpage); - if (pages_identical(oldpage, newpage)) - err = replace_page(vma, oldpage, newpage, orig_pte); + if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { + munlock_vma_page(page); + if (!PageMlocked(kpage)) { + unlock_page(page); + lock_page(kpage); + mlock_vma_page(kpage); + page = kpage; /* for final unlock */ + } + } -out_putpage: - put_page(oldpage); - put_page(newpage); + unlock_page(page); out: return err; } @@ -786,26 +895,31 @@ out: /* * try_to_merge_with_ksm_page - like try_to_merge_two_pages, * but no new kernel page is allocated: kpage must already be a ksm page. + * + * This function returns 0 if the pages were merged, -EFAULT otherwise. */ -static int try_to_merge_with_ksm_page(struct mm_struct *mm1, - unsigned long addr1, - struct page *page1, - struct page *kpage) +static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, + struct page *page, struct page *kpage) { + struct mm_struct *mm = rmap_item->mm; struct vm_area_struct *vma; int err = -EFAULT; - down_read(&mm1->mmap_sem); - if (ksm_test_exit(mm1)) + down_read(&mm->mmap_sem); + if (ksm_test_exit(mm)) + goto out; + vma = find_vma(mm, rmap_item->address); + if (!vma || vma->vm_start > rmap_item->address) goto out; - vma = find_vma(mm1, addr1); - if (!vma || vma->vm_start > addr1) + err = try_to_merge_one_page(vma, page, kpage); + if (err) goto out; - err = try_to_merge_one_page(vma, page1, kpage); + /* Must get reference to anon_vma while still holding mmap_sem */ + hold_anon_vma(rmap_item, vma->anon_vma); out: - up_read(&mm1->mmap_sem); + up_read(&mm->mmap_sem); return err; } @@ -813,109 +927,73 @@ out: * try_to_merge_two_pages - take two identical pages and prepare them * to be merged into one page. * - * This function returns 0 if we successfully mapped two identical pages - * into one page, -EFAULT otherwise. + * This function returns the kpage if we successfully merged two identical + * pages into one ksm page, NULL otherwise. * - * Note that this function allocates a new kernel page: if one of the pages + * Note that this function upgrades page to ksm page: if one of the pages * is already a ksm page, try_to_merge_with_ksm_page should be used. */ -static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, - struct page *page1, struct mm_struct *mm2, - unsigned long addr2, struct page *page2) +static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, + struct page *page, + struct rmap_item *tree_rmap_item, + struct page *tree_page) { - struct vm_area_struct *vma; - struct page *kpage; - int err = -EFAULT; - - /* - * The number of nodes in the stable tree - * is the number of kernel pages that we hold. - */ - if (ksm_max_kernel_pages && - ksm_max_kernel_pages <= ksm_pages_shared) - return err; - - kpage = alloc_page(GFP_HIGHUSER); - if (!kpage) - return err; - - down_read(&mm1->mmap_sem); - if (ksm_test_exit(mm1)) { - up_read(&mm1->mmap_sem); - goto out; - } - vma = find_vma(mm1, addr1); - if (!vma || vma->vm_start > addr1) { - up_read(&mm1->mmap_sem); - goto out; - } - - copy_user_highpage(kpage, page1, addr1, vma); - err = try_to_merge_one_page(vma, page1, kpage); - up_read(&mm1->mmap_sem); + int err; + err = try_to_merge_with_ksm_page(rmap_item, page, NULL); if (!err) { - err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage); + err = try_to_merge_with_ksm_page(tree_rmap_item, + tree_page, page); /* * If that fails, we have a ksm page with only one pte * pointing to it: so break it. */ if (err) - break_cow(mm1, addr1); + break_cow(rmap_item); } -out: - put_page(kpage); - return err; + return err ? NULL : page; } /* - * stable_tree_search - search page inside the stable tree - * @page: the page that we are searching identical pages to. - * @page2: pointer into identical page that we are holding inside the stable - * tree that we have found. - * @rmap_item: the reverse mapping item + * stable_tree_search - search for page inside the stable tree * * This function checks if there is a page inside the stable tree * with identical content to the page that we are scanning right now. * - * This function return rmap_item pointer to the identical item if found, + * This function returns the stable tree node of identical content if found, * NULL otherwise. */ -static struct rmap_item *stable_tree_search(struct page *page, - struct page **page2, - struct rmap_item *rmap_item) +static struct page *stable_tree_search(struct page *page) { struct rb_node *node = root_stable_tree.rb_node; + struct stable_node *stable_node; + + stable_node = page_stable_node(page); + if (stable_node) { /* ksm page forked */ + get_page(page); + return page; + } while (node) { - struct rmap_item *tree_rmap_item, *next_rmap_item; + struct page *tree_page; int ret; - tree_rmap_item = rb_entry(node, struct rmap_item, node); - while (tree_rmap_item) { - BUG_ON(!in_stable_tree(tree_rmap_item)); - cond_resched(); - page2[0] = get_ksm_page(tree_rmap_item); - if (page2[0]) - break; - next_rmap_item = tree_rmap_item->next; - remove_rmap_item_from_tree(tree_rmap_item); - tree_rmap_item = next_rmap_item; - } - if (!tree_rmap_item) + cond_resched(); + stable_node = rb_entry(node, struct stable_node, node); + tree_page = get_ksm_page(stable_node); + if (!tree_page) return NULL; - ret = memcmp_pages(page, page2[0]); + ret = memcmp_pages(page, tree_page); if (ret < 0) { - put_page(page2[0]); + put_page(tree_page); node = node->rb_left; } else if (ret > 0) { - put_page(page2[0]); + put_page(tree_page); node = node->rb_right; - } else { - return tree_rmap_item; - } + } else + return tree_page; } return NULL; @@ -925,38 +1003,26 @@ static struct rmap_item *stable_tree_search(struct page *page, * stable_tree_insert - insert rmap_item pointing to new ksm page * into the stable tree. * - * @page: the page that we are searching identical page to inside the stable - * tree. - * @rmap_item: pointer to the reverse mapping item. - * - * This function returns rmap_item if success, NULL otherwise. + * This function returns the stable tree node just allocated on success, + * NULL otherwise. */ -static struct rmap_item *stable_tree_insert(struct page *page, - struct rmap_item *rmap_item) +static struct stable_node *stable_tree_insert(struct page *kpage) { struct rb_node **new = &root_stable_tree.rb_node; struct rb_node *parent = NULL; + struct stable_node *stable_node; while (*new) { - struct rmap_item *tree_rmap_item, *next_rmap_item; struct page *tree_page; int ret; - tree_rmap_item = rb_entry(*new, struct rmap_item, node); - while (tree_rmap_item) { - BUG_ON(!in_stable_tree(tree_rmap_item)); - cond_resched(); - tree_page = get_ksm_page(tree_rmap_item); - if (tree_page) - break; - next_rmap_item = tree_rmap_item->next; - remove_rmap_item_from_tree(tree_rmap_item); - tree_rmap_item = next_rmap_item; - } - if (!tree_rmap_item) + cond_resched(); + stable_node = rb_entry(*new, struct stable_node, node); + tree_page = get_ksm_page(stable_node); + if (!tree_page) return NULL; - ret = memcmp_pages(page, tree_page); + ret = memcmp_pages(kpage, tree_page); put_page(tree_page); parent = *new; @@ -974,22 +1040,24 @@ static struct rmap_item *stable_tree_insert(struct page *page, } } - rmap_item->address |= NODE_FLAG | STABLE_FLAG; - rmap_item->next = NULL; - rb_link_node(&rmap_item->node, parent, new); - rb_insert_color(&rmap_item->node, &root_stable_tree); + stable_node = alloc_stable_node(); + if (!stable_node) + return NULL; - ksm_pages_shared++; - return rmap_item; + rb_link_node(&stable_node->node, parent, new); + rb_insert_color(&stable_node->node, &root_stable_tree); + + INIT_HLIST_HEAD(&stable_node->hlist); + + stable_node->kpfn = page_to_pfn(kpage); + set_page_stable_node(kpage, stable_node); + + return stable_node; } /* - * unstable_tree_search_insert - search and insert items into the unstable tree. - * - * @page: the page that we are going to search for identical page or to insert - * into the unstable tree - * @page2: pointer into identical page that was found inside the unstable tree - * @rmap_item: the reverse mapping item of page + * unstable_tree_search_insert - search for identical page, + * else insert rmap_item into the unstable tree. * * This function searches for a page in the unstable tree identical to the * page currently being scanned; and if no identical page is found in the @@ -1001,47 +1069,50 @@ static struct rmap_item *stable_tree_insert(struct page *page, * This function does both searching and inserting, because they share * the same walking algorithm in an rbtree. */ -static struct rmap_item *unstable_tree_search_insert(struct page *page, - struct page **page2, - struct rmap_item *rmap_item) +static +struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, + struct page *page, + struct page **tree_pagep) + { struct rb_node **new = &root_unstable_tree.rb_node; struct rb_node *parent = NULL; while (*new) { struct rmap_item *tree_rmap_item; + struct page *tree_page; int ret; cond_resched(); tree_rmap_item = rb_entry(*new, struct rmap_item, node); - page2[0] = get_mergeable_page(tree_rmap_item); - if (!page2[0]) + tree_page = get_mergeable_page(tree_rmap_item); + if (!tree_page) return NULL; /* - * Don't substitute an unswappable ksm page - * just for one good swappable forked page. + * Don't substitute a ksm page for a forked page. */ - if (page == page2[0]) { - put_page(page2[0]); + if (page == tree_page) { + put_page(tree_page); return NULL; } - ret = memcmp_pages(page, page2[0]); + ret = memcmp_pages(page, tree_page); parent = *new; if (ret < 0) { - put_page(page2[0]); + put_page(tree_page); new = &parent->rb_left; } else if (ret > 0) { - put_page(page2[0]); + put_page(tree_page); new = &parent->rb_right; } else { + *tree_pagep = tree_page; return tree_rmap_item; } } - rmap_item->address |= NODE_FLAG; + rmap_item->address |= UNSTABLE_FLAG; rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); rb_link_node(&rmap_item->node, parent, new); rb_insert_color(&rmap_item->node, &root_unstable_tree); @@ -1056,18 +1127,16 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page, * the same ksm page. */ static void stable_tree_append(struct rmap_item *rmap_item, - struct rmap_item *tree_rmap_item) + struct stable_node *stable_node) { - rmap_item->next = tree_rmap_item->next; - rmap_item->prev = tree_rmap_item; - - if (tree_rmap_item->next) - tree_rmap_item->next->prev = rmap_item; - - tree_rmap_item->next = rmap_item; + rmap_item->head = stable_node; rmap_item->address |= STABLE_FLAG; + hlist_add_head(&rmap_item->hlist, &stable_node->hlist); - ksm_pages_sharing++; + if (rmap_item->hlist.next) + ksm_pages_sharing++; + else + ksm_pages_shared++; } /* @@ -1081,49 +1150,37 @@ static void stable_tree_append(struct rmap_item *rmap_item, */ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) { - struct page *page2[1]; struct rmap_item *tree_rmap_item; + struct page *tree_page = NULL; + struct stable_node *stable_node; + struct page *kpage; unsigned int checksum; int err; - if (in_stable_tree(rmap_item)) - remove_rmap_item_from_tree(rmap_item); + remove_rmap_item_from_tree(rmap_item); /* We first start with searching the page inside the stable tree */ - tree_rmap_item = stable_tree_search(page, page2, rmap_item); - if (tree_rmap_item) { - if (page == page2[0]) /* forked */ - err = 0; - else - err = try_to_merge_with_ksm_page(rmap_item->mm, - rmap_item->address, - page, page2[0]); - put_page(page2[0]); - + kpage = stable_tree_search(page); + if (kpage) { + err = try_to_merge_with_ksm_page(rmap_item, page, kpage); if (!err) { /* * The page was successfully merged: * add its rmap_item to the stable tree. */ - stable_tree_append(rmap_item, tree_rmap_item); + lock_page(kpage); + stable_tree_append(rmap_item, page_stable_node(kpage)); + unlock_page(kpage); } + put_page(kpage); return; } /* - * A ksm page might have got here by fork, but its other - * references have already been removed from the stable tree. - * Or it might be left over from a break_ksm which failed - * when the mem_cgroup had reached its limit: try again now. - */ - if (PageKsm(page)) - break_cow(rmap_item->mm, rmap_item->address); - - /* - * In case the hash value of the page was changed from the last time we - * have calculated it, this page to be changed frequely, therefore we - * don't want to insert it to the unstable tree, and we don't want to - * waste our time to search if there is something identical to it there. + * If the hash value of the page has changed from the last time + * we calculated it, this page is changing frequently: therefore we + * don't want to insert it in the unstable tree, and we don't want + * to waste our time searching for something identical to it there. */ checksum = calc_checksum(page); if (rmap_item->oldchecksum != checksum) { @@ -1131,21 +1188,27 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) return; } - tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item); + tree_rmap_item = + unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { - err = try_to_merge_two_pages(rmap_item->mm, - rmap_item->address, page, - tree_rmap_item->mm, - tree_rmap_item->address, page2[0]); + kpage = try_to_merge_two_pages(rmap_item, page, + tree_rmap_item, tree_page); + put_page(tree_page); /* * As soon as we merge this page, we want to remove the * rmap_item of the page we have merged with from the unstable * tree, and insert it instead as new node in the stable tree. */ - if (!err) { - rb_erase(&tree_rmap_item->node, &root_unstable_tree); - tree_rmap_item->address &= ~NODE_FLAG; - ksm_pages_unshared--; + if (kpage) { + remove_rmap_item_from_tree(tree_rmap_item); + + lock_page(kpage); + stable_node = stable_tree_insert(kpage); + if (stable_node) { + stable_tree_append(tree_rmap_item, stable_node); + stable_tree_append(rmap_item, stable_node); + } + unlock_page(kpage); /* * If we fail to insert the page into the stable tree, @@ -1153,37 +1216,28 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) * to a ksm page left outside the stable tree, * in which case we need to break_cow on both. */ - if (stable_tree_insert(page2[0], tree_rmap_item)) - stable_tree_append(rmap_item, tree_rmap_item); - else { - break_cow(tree_rmap_item->mm, - tree_rmap_item->address); - break_cow(rmap_item->mm, rmap_item->address); + if (!stable_node) { + break_cow(tree_rmap_item); + break_cow(rmap_item); } } - - put_page(page2[0]); } } static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, - struct list_head *cur, + struct rmap_item **rmap_list, unsigned long addr) { struct rmap_item *rmap_item; - while (cur != &mm_slot->rmap_list) { - rmap_item = list_entry(cur, struct rmap_item, link); - if ((rmap_item->address & PAGE_MASK) == addr) { - if (!in_stable_tree(rmap_item)) - remove_rmap_item_from_tree(rmap_item); + while (*rmap_list) { + rmap_item = *rmap_list; + if ((rmap_item->address & PAGE_MASK) == addr) return rmap_item; - } if (rmap_item->address > addr) break; - cur = cur->next; + *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); - list_del(&rmap_item->link); free_rmap_item(rmap_item); } @@ -1192,7 +1246,8 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, /* It has already been zeroed */ rmap_item->mm = mm_slot->mm; rmap_item->address = addr; - list_add_tail(&rmap_item->link, cur); + rmap_item->rmap_list = *rmap_list; + *rmap_list = rmap_item; } return rmap_item; } @@ -1217,8 +1272,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) spin_unlock(&ksm_mmlist_lock); next_mm: ksm_scan.address = 0; - ksm_scan.rmap_item = list_entry(&slot->rmap_list, - struct rmap_item, link); + ksm_scan.rmap_list = &slot->rmap_list; } mm = slot->mm; @@ -1244,10 +1298,10 @@ next_mm: flush_anon_page(vma, *page, ksm_scan.address); flush_dcache_page(*page); rmap_item = get_next_rmap_item(slot, - ksm_scan.rmap_item->link.next, - ksm_scan.address); + ksm_scan.rmap_list, ksm_scan.address); if (rmap_item) { - ksm_scan.rmap_item = rmap_item; + ksm_scan.rmap_list = + &rmap_item->rmap_list; ksm_scan.address += PAGE_SIZE; } else put_page(*page); @@ -1263,14 +1317,13 @@ next_mm: if (ksm_test_exit(mm)) { ksm_scan.address = 0; - ksm_scan.rmap_item = list_entry(&slot->rmap_list, - struct rmap_item, link); + ksm_scan.rmap_list = &slot->rmap_list; } /* * Nuke all the rmap_items that are above this current rmap: * because there were no VM_MERGEABLE vmas with such addresses. */ - remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next); + remove_trailing_rmap_items(slot, ksm_scan.rmap_list); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(slot->mm_list.next, @@ -1323,14 +1376,6 @@ static void ksm_do_scan(unsigned int scan_npages) return; if (!PageKsm(page) || !in_stable_tree(rmap_item)) cmp_and_merge_page(page, rmap_item); - else if (page_mapcount(page) == 1) { - /* - * Replace now-unshared ksm page by ordinary page. - */ - break_cow(rmap_item->mm, rmap_item->address); - remove_rmap_item_from_tree(rmap_item); - rmap_item->oldchecksum = calc_checksum(page); - } put_page(page); } } @@ -1375,7 +1420,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | - VM_MIXEDMAP | VM_SAO)) + VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) return 0; /* just ignore the advice */ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { @@ -1452,7 +1497,7 @@ void __ksm_exit(struct mm_struct *mm) spin_lock(&ksm_mmlist_lock); mm_slot = get_mm_slot(mm); if (mm_slot && ksm_scan.mm_slot != mm_slot) { - if (list_empty(&mm_slot->rmap_list)) { + if (!mm_slot->rmap_list) { hlist_del(&mm_slot->link); list_del(&mm_slot->mm_list); easy_to_free = 1; @@ -1473,6 +1518,249 @@ void __ksm_exit(struct mm_struct *mm) } } +struct page *ksm_does_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + struct page *new_page; + + unlock_page(page); /* any racers will COW it, not modify it */ + + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + if (new_page) { + copy_user_highpage(new_page, page, address, vma); + + SetPageDirty(new_page); + __SetPageUptodate(new_page); + SetPageSwapBacked(new_page); + __set_page_locked(new_page); + + if (page_evictable(new_page, vma)) + lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); + else + add_page_to_unevictable_list(new_page); + } + + page_cache_release(page); + return new_page; +} + +int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, + unsigned long *vm_flags) +{ + struct stable_node *stable_node; + struct rmap_item *rmap_item; + struct hlist_node *hlist; + unsigned int mapcount = page_mapcount(page); + int referenced = 0; + int search_new_forks = 0; + + VM_BUG_ON(!PageKsm(page)); + VM_BUG_ON(!PageLocked(page)); + + stable_node = page_stable_node(page); + if (!stable_node) + return 0; +again: + hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + struct anon_vma *anon_vma = rmap_item->anon_vma; + struct vm_area_struct *vma; + + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + if (rmap_item->address < vma->vm_start || + rmap_item->address >= vma->vm_end) + continue; + /* + * Initially we examine only the vma which covers this + * rmap_item; but later, if there is still work to do, + * we examine covering vmas in other mms: in case they + * were forked from the original since ksmd passed. + */ + if ((rmap_item->mm == vma->vm_mm) == search_new_forks) + continue; + + if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) + continue; + + referenced += page_referenced_one(page, vma, + rmap_item->address, &mapcount, vm_flags); + if (!search_new_forks || !mapcount) + break; + } + spin_unlock(&anon_vma->lock); + if (!mapcount) + goto out; + } + if (!search_new_forks++) + goto again; +out: + return referenced; +} + +int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) +{ + struct stable_node *stable_node; + struct hlist_node *hlist; + struct rmap_item *rmap_item; + int ret = SWAP_AGAIN; + int search_new_forks = 0; + + VM_BUG_ON(!PageKsm(page)); + VM_BUG_ON(!PageLocked(page)); + + stable_node = page_stable_node(page); + if (!stable_node) + return SWAP_FAIL; +again: + hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + struct anon_vma *anon_vma = rmap_item->anon_vma; + struct vm_area_struct *vma; + + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + if (rmap_item->address < vma->vm_start || + rmap_item->address >= vma->vm_end) + continue; + /* + * Initially we examine only the vma which covers this + * rmap_item; but later, if there is still work to do, + * we examine covering vmas in other mms: in case they + * were forked from the original since ksmd passed. + */ + if ((rmap_item->mm == vma->vm_mm) == search_new_forks) + continue; + + ret = try_to_unmap_one(page, vma, + rmap_item->address, flags); + if (ret != SWAP_AGAIN || !page_mapped(page)) { + spin_unlock(&anon_vma->lock); + goto out; + } + } + spin_unlock(&anon_vma->lock); + } + if (!search_new_forks++) + goto again; +out: + return ret; +} + +#ifdef CONFIG_MIGRATION +int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct stable_node *stable_node; + struct hlist_node *hlist; + struct rmap_item *rmap_item; + int ret = SWAP_AGAIN; + int search_new_forks = 0; + + VM_BUG_ON(!PageKsm(page)); + VM_BUG_ON(!PageLocked(page)); + + stable_node = page_stable_node(page); + if (!stable_node) + return ret; +again: + hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + struct anon_vma *anon_vma = rmap_item->anon_vma; + struct vm_area_struct *vma; + + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + if (rmap_item->address < vma->vm_start || + rmap_item->address >= vma->vm_end) + continue; + /* + * Initially we examine only the vma which covers this + * rmap_item; but later, if there is still work to do, + * we examine covering vmas in other mms: in case they + * were forked from the original since ksmd passed. + */ + if ((rmap_item->mm == vma->vm_mm) == search_new_forks) + continue; + + ret = rmap_one(page, vma, rmap_item->address, arg); + if (ret != SWAP_AGAIN) { + spin_unlock(&anon_vma->lock); + goto out; + } + } + spin_unlock(&anon_vma->lock); + } + if (!search_new_forks++) + goto again; +out: + return ret; +} + +void ksm_migrate_page(struct page *newpage, struct page *oldpage) +{ + struct stable_node *stable_node; + + VM_BUG_ON(!PageLocked(oldpage)); + VM_BUG_ON(!PageLocked(newpage)); + VM_BUG_ON(newpage->mapping != oldpage->mapping); + + stable_node = page_stable_node(newpage); + if (stable_node) { + VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); + stable_node->kpfn = page_to_pfn(newpage); + } +} +#endif /* CONFIG_MIGRATION */ + +#ifdef CONFIG_MEMORY_HOTREMOVE +static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, + unsigned long end_pfn) +{ + struct rb_node *node; + + for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { + struct stable_node *stable_node; + + stable_node = rb_entry(node, struct stable_node, node); + if (stable_node->kpfn >= start_pfn && + stable_node->kpfn < end_pfn) + return stable_node; + } + return NULL; +} + +static int ksm_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct memory_notify *mn = arg; + struct stable_node *stable_node; + + switch (action) { + case MEM_GOING_OFFLINE: + /* + * Keep it very simple for now: just lock out ksmd and + * MADV_UNMERGEABLE while any memory is going offline. + */ + mutex_lock(&ksm_thread_mutex); + break; + + case MEM_OFFLINE: + /* + * Most of the work is done by page migration; but there might + * be a few stable_nodes left over, still pointing to struct + * pages which have been offlined: prune those from the tree. + */ + while ((stable_node = ksm_check_stable_tree(mn->start_pfn, + mn->start_pfn + mn->nr_pages)) != NULL) + remove_node_from_stable_tree(stable_node); + /* fallthrough */ + + case MEM_CANCEL_OFFLINE: + mutex_unlock(&ksm_thread_mutex); + break; + } + return NOTIFY_OK; +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ + #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. @@ -1551,8 +1839,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, /* * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, - * breaking COW to free the unswappable pages_shared (but leaves - * mm_slots on the list for when ksmd may be set running again). + * breaking COW to free the pages_shared (but leaves mm_slots + * on the list for when ksmd may be set running again). */ mutex_lock(&ksm_thread_mutex); @@ -1577,29 +1865,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, } KSM_ATTR(run); -static ssize_t max_kernel_pages_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - int err; - unsigned long nr_pages; - - err = strict_strtoul(buf, 10, &nr_pages); - if (err) - return -EINVAL; - - ksm_max_kernel_pages = nr_pages; - - return count; -} - -static ssize_t max_kernel_pages_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", ksm_max_kernel_pages); -} -KSM_ATTR(max_kernel_pages); - static ssize_t pages_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -1649,7 +1914,6 @@ static struct attribute *ksm_attrs[] = { &sleep_millisecs_attr.attr, &pages_to_scan_attr.attr, &run_attr.attr, - &max_kernel_pages_attr.attr, &pages_shared_attr.attr, &pages_sharing_attr.attr, &pages_unshared_attr.attr, @@ -1669,8 +1933,6 @@ static int __init ksm_init(void) struct task_struct *ksm_thread; int err; - ksm_max_kernel_pages = totalram_pages / 4; - err = ksm_slab_init(); if (err) goto out; @@ -1698,6 +1960,13 @@ static int __init ksm_init(void) #endif /* CONFIG_SYSFS */ +#ifdef CONFIG_MEMORY_HOTREMOVE + /* + * Choose a high priority since the callback takes ksm_thread_mutex: + * later callbacks could only be taking locks which nest within that. + */ + hotplug_memory_notifier(ksm_memory_callback, 100); +#endif return 0; out_free2: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c31a310aa14..e0c2066495e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1737,11 +1737,12 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, goto charge_cur_mm; /* * A racing thread's fault, or swapoff, may have already updated - * the pte, and even removed page from swap cache: return success - * to go on to do_swap_page()'s pte_same() test, which should fail. + * the pte, and even removed page from swap cache: in those cases + * do_swap_page()'s pte_same() test will fail; but there's also a + * KSM case which does need to charge the page. */ if (!PageSwapCache(page)) - return 0; + goto charge_cur_mm; mem = try_get_mem_cgroup_from_swapcache(page); if (!mem) goto charge_cur_mm; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1ac49fef95a..50d4f8d7024 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -582,10 +582,8 @@ static struct page_state { { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, { unevict, unevict, "unevictable LRU", me_pagecache_clean}, -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, { mlock, mlock, "mlocked LRU", me_pagecache_clean }, -#endif { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, { lru|dirty, lru, "clean LRU", me_pagecache_clean }, diff --git a/mm/memory.c b/mm/memory.c index 6ab19dd4a19..a54b2c49844 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -572,7 +572,7 @@ out: * covered by this vma. */ -static inline void +static inline unsigned long copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) @@ -586,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (!pte_file(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); - swap_duplicate(entry); + if (swap_duplicate(entry) < 0) + return entry.val; + /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); @@ -635,6 +637,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, out_set_pte: set_pte_at(dst_mm, addr, dst_pte, pte); + return 0; } static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -646,6 +649,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, spinlock_t *src_ptl, *dst_ptl; int progress = 0; int rss[2]; + swp_entry_t entry = (swp_entry_t){0}; again: rss[1] = rss[0] = 0; @@ -674,7 +678,10 @@ again: progress++; continue; } - copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); + entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, + vma, addr, rss); + if (entry.val) + break; progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); @@ -684,6 +691,12 @@ again: add_mm_rss(dst_mm, rss[0], rss[1]); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); + + if (entry.val) { + if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) + return -ENOMEM; + progress = 0; + } if (addr != end) goto again; return 0; @@ -2514,7 +2527,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_HWPOISON; } else { print_bad_pte(vma, address, orig_pte, NULL); - ret = VM_FAULT_OOM; + ret = VM_FAULT_SIGBUS; } goto out; } @@ -2548,6 +2561,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + page = ksm_might_need_to_copy(page, vma, address); + if (!page) { + ret = VM_FAULT_OOM; + goto out; + } + if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { ret = VM_FAULT_OOM; goto out_page; @@ -2910,7 +2929,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, * Page table corrupted: show pte and kill process. */ print_bad_pte(vma, address, orig_pte, NULL); - return VM_FAULT_OOM; + return VM_FAULT_SIGBUS; } pgoff = pte_to_pgoff(orig_pte); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2047465cd27..030ce8a5bb0 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -27,6 +27,7 @@ #include <linux/page-isolation.h> #include <linux/pfn.h> #include <linux/suspend.h> +#include <linux/mm_inline.h> #include <asm/tlbflush.h> @@ -71,7 +72,9 @@ static void get_page_bootmem(unsigned long info, struct page *page, int type) atomic_inc(&page->_count); } -void put_page_bootmem(struct page *page) +/* reference to __meminit __free_pages_bootmem is valid + * so use __ref to tell modpost not to generate a warning */ +void __ref put_page_bootmem(struct page *page) { int type; @@ -672,6 +675,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (!ret) { /* Success */ list_add_tail(&page->lru, &source); move_pages--; + inc_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); + } else { /* Becasue we don't have big zone->lock. we should check this again here. */ @@ -694,7 +700,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (list_empty(&source)) goto out; /* this function returns # of failed pages */ - ret = migrate_pages(&source, hotremove_migrate_alloc, 0); + ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); out: return ret; @@ -747,7 +753,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) return offlined; } -int offline_pages(unsigned long start_pfn, +static int offline_pages(unsigned long start_pfn, unsigned long end_pfn, unsigned long timeout) { unsigned long pfn, nr_pages, expire; @@ -849,6 +855,10 @@ repeat: setup_per_zone_wmarks(); calculate_zone_inactive_ratio(zone); + if (!node_present_pages(node)) { + node_clear_state(node, N_HIGH_MEMORY); + kswapd_stop(node); + } vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4545d594424..290fb5bf044 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -85,10 +85,12 @@ #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> +#include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ctype.h> +#include <linux/mm_inline.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> @@ -412,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (!page) continue; /* - * The check for PageReserved here is important to avoid - * handling zero pages and other pages that may have been - * marked special by the system. - * - * If the PageReserved would not be checked here then f.e. - * the location of the zero page could have an influence - * on MPOL_MF_STRICT, zero pages would be counted for - * the per node stats, and there would be useless attempts - * to put zero pages on the migration list. + * vm_normal_page() filters out zero pages, but there might + * still be PageReserved pages to skip, perhaps in a VDSO. + * And we cannot move PageKsm pages sensibly or safely yet. */ - if (PageReserved(page)) + if (PageReserved(page) || PageKsm(page)) continue; nid = page_to_nid(page); if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) @@ -809,6 +805,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if (!isolate_lru_page(page)) { list_add_tail(&page->lru, pagelist); + inc_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); } } } @@ -836,7 +834,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) - err = migrate_pages(&pagelist, new_node_page, dest); + err = migrate_pages(&pagelist, new_node_page, dest, 0); return err; } @@ -1053,7 +1051,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) nr_failed = migrate_pages(&pagelist, new_vma_page, - (unsigned long)vma); + (unsigned long)vma, 0); if (!err && nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; @@ -1565,6 +1563,53 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, } return zl; } + +/* + * init_nodemask_of_mempolicy + * + * If the current task's mempolicy is "default" [NULL], return 'false' + * to indicate default policy. Otherwise, extract the policy nodemask + * for 'bind' or 'interleave' policy into the argument nodemask, or + * initialize the argument nodemask to contain the single node for + * 'preferred' or 'local' policy and return 'true' to indicate presence + * of non-default mempolicy. + * + * We don't bother with reference counting the mempolicy [mpol_get/put] + * because the current task is examining it's own mempolicy and a task's + * mempolicy is only ever changed by the task itself. + * + * N.B., it is the caller's responsibility to free a returned nodemask. + */ +bool init_nodemask_of_mempolicy(nodemask_t *mask) +{ + struct mempolicy *mempolicy; + int nid; + + if (!(mask && current->mempolicy)) + return false; + + mempolicy = current->mempolicy; + switch (mempolicy->mode) { + case MPOL_PREFERRED: + if (mempolicy->flags & MPOL_F_LOCAL) + nid = numa_node_id(); + else + nid = mempolicy->v.preferred_node; + init_nodemask_of_node(mask, nid); + break; + + case MPOL_BIND: + /* Fall through */ + case MPOL_INTERLEAVE: + *mask = mempolicy->v.nodes; + break; + + default: + BUG(); + } + + return true; +} #endif /* Allocate a page in interleaved policy. diff --git a/mm/migrate.c b/mm/migrate.c index 0bc640fd68f..efddbf0926b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -21,6 +21,7 @@ #include <linux/mm_inline.h> #include <linux/nsproxy.h> #include <linux/pagevec.h> +#include <linux/ksm.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> @@ -78,8 +79,8 @@ int putback_lru_pages(struct list_head *l) /* * Restore a potential migration pte to a working pte entry */ -static void remove_migration_pte(struct vm_area_struct *vma, - struct page *old, struct page *new) +static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, + unsigned long addr, void *old) { struct mm_struct *mm = vma->vm_mm; swp_entry_t entry; @@ -88,40 +89,37 @@ static void remove_migration_pte(struct vm_area_struct *vma, pmd_t *pmd; pte_t *ptep, pte; spinlock_t *ptl; - unsigned long addr = page_address_in_vma(new, vma); - - if (addr == -EFAULT) - return; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) - return; + goto out; pud = pud_offset(pgd, addr); if (!pud_present(*pud)) - return; + goto out; pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) - return; + goto out; ptep = pte_offset_map(pmd, addr); if (!is_swap_pte(*ptep)) { pte_unmap(ptep); - return; + goto out; } ptl = pte_lockptr(mm, pmd); spin_lock(ptl); pte = *ptep; if (!is_swap_pte(pte)) - goto out; + goto unlock; entry = pte_to_swp_entry(pte); - if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) - goto out; + if (!is_migration_entry(entry) || + migration_entry_to_page(entry) != old) + goto unlock; get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); @@ -137,58 +135,10 @@ static void remove_migration_pte(struct vm_area_struct *vma, /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, pte); - -out: +unlock: pte_unmap_unlock(ptep, ptl); -} - -/* - * Note that remove_file_migration_ptes will only work on regular mappings, - * Nonlinear mappings do not use migration entries. - */ -static void remove_file_migration_ptes(struct page *old, struct page *new) -{ - struct vm_area_struct *vma; - struct address_space *mapping = new->mapping; - struct prio_tree_iter iter; - pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - - if (!mapping) - return; - - spin_lock(&mapping->i_mmap_lock); - - vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) - remove_migration_pte(vma, old, new); - - spin_unlock(&mapping->i_mmap_lock); -} - -/* - * Must hold mmap_sem lock on at least one of the vmas containing - * the page so that the anon_vma cannot vanish. - */ -static void remove_anon_migration_ptes(struct page *old, struct page *new) -{ - struct anon_vma *anon_vma; - struct vm_area_struct *vma; - unsigned long mapping; - - mapping = (unsigned long)new->mapping; - - if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) - return; - - /* - * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. - */ - anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); - spin_lock(&anon_vma->lock); - - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) - remove_migration_pte(vma, old, new); - - spin_unlock(&anon_vma->lock); +out: + return SWAP_AGAIN; } /* @@ -197,10 +147,7 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new) */ static void remove_migration_ptes(struct page *old, struct page *new) { - if (PageAnon(new)) - remove_anon_migration_ptes(old, new); - else - remove_file_migration_ptes(old, new); + rmap_walk(new, remove_migration_pte, old); } /* @@ -341,8 +288,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page) if (TestClearPageActive(page)) { VM_BUG_ON(PageUnevictable(page)); SetPageActive(newpage); - } else - unevictable_migrate_page(newpage, page); + } else if (TestClearPageUnevictable(page)) + SetPageUnevictable(newpage); if (PageChecked(page)) SetPageChecked(newpage); if (PageMappedToDisk(page)) @@ -361,6 +308,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page) } mlock_migrate_page(newpage, page); + ksm_migrate_page(newpage, page); ClearPageSwapCache(page); ClearPagePrivate(page); @@ -580,9 +528,9 @@ static int move_to_new_page(struct page *newpage, struct page *page) else rc = fallback_migrate_page(mapping, newpage, page); - if (!rc) { + if (!rc) remove_migration_ptes(page, newpage); - } else + else newpage->mapping = NULL; unlock_page(newpage); @@ -595,7 +543,7 @@ static int move_to_new_page(struct page *newpage, struct page *page) * to the newly allocated page in newpage. */ static int unmap_and_move(new_page_t get_new_page, unsigned long private, - struct page *page, int force) + struct page *page, int force, int offlining) { int rc = 0; int *result = NULL; @@ -621,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, lock_page(page); } + /* + * Only memory hotplug's offline_pages() caller has locked out KSM, + * and can safely migrate a KSM page. The other cases have skipped + * PageKsm along with PageReserved - but it is only now when we have + * the page lock that we can be certain it will not go KSM beneath us + * (KSM will not upgrade a page from PageAnon to PageKsm when it sees + * its pagecount raised, but only here do we take the page lock which + * serializes that). + */ + if (PageKsm(page) && !offlining) { + rc = -EBUSY; + goto unlock; + } + /* charge against new page */ charge = mem_cgroup_prepare_migration(page, &mem); if (charge == -ENOMEM) { @@ -737,7 +699,7 @@ move_newpage: * Return: Number of pages not migrated or error code. */ int migrate_pages(struct list_head *from, - new_page_t get_new_page, unsigned long private) + new_page_t get_new_page, unsigned long private, int offlining) { int retry = 1; int nr_failed = 0; @@ -746,13 +708,6 @@ int migrate_pages(struct list_head *from, struct page *page2; int swapwrite = current->flags & PF_SWAPWRITE; int rc; - unsigned long flags; - - local_irq_save(flags); - list_for_each_entry(page, from, lru) - __inc_zone_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); - local_irq_restore(flags); if (!swapwrite) current->flags |= PF_SWAPWRITE; @@ -764,7 +719,7 @@ int migrate_pages(struct list_head *from, cond_resched(); rc = unmap_and_move(get_new_page, private, - page, pass > 2); + page, pass > 2, offlining); switch(rc) { case -ENOMEM: @@ -860,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm, if (!page) goto set_status; - if (PageReserved(page)) /* Check for zero page */ + /* Use PageReserved to check for zero page */ + if (PageReserved(page) || PageKsm(page)) goto put_and_set; pp->page = page; @@ -878,8 +834,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm, goto put_and_set; err = isolate_lru_page(page); - if (!err) + if (!err) { list_add_tail(&page->lru, &pagelist); + inc_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); + } put_and_set: /* * Either remove the duplicate refcount from @@ -894,7 +853,7 @@ set_status: err = 0; if (!list_empty(&pagelist)) err = migrate_pages(&pagelist, new_page_node, - (unsigned long)pm); + (unsigned long)pm, 0); up_read(&mm->mmap_sem); return err; @@ -1015,7 +974,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, err = -ENOENT; /* Use PageReserved to check for zero page */ - if (!page || PageReserved(page)) + if (!page || PageReserved(page) || PageKsm(page)) goto set_status; err = page_to_nid(page); diff --git a/mm/mincore.c b/mm/mincore.c index 8cb508f84ea..7a3436ef39e 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -14,6 +14,7 @@ #include <linux/syscalls.h> #include <linux/swap.h> #include <linux/swapops.h> +#include <linux/hugetlb.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -72,6 +73,42 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag if (!vma || addr < vma->vm_start) return -ENOMEM; +#ifdef CONFIG_HUGETLB_PAGE + if (is_vm_hugetlb_page(vma)) { + struct hstate *h; + unsigned long nr_huge; + unsigned char present; + + i = 0; + nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT); + h = hstate_vma(vma); + nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h)) + - (addr >> huge_page_shift(h)) + 1; + nr_huge = min(nr_huge, + (vma->vm_end - addr) >> huge_page_shift(h)); + while (1) { + /* hugepage always in RAM for now, + * but generally it needs to be check */ + ptep = huge_pte_offset(current->mm, + addr & huge_page_mask(h)); + present = !!(ptep && + !huge_pte_none(huge_ptep_get(ptep))); + while (1) { + vec[i++] = present; + addr += PAGE_SIZE; + /* reach buffer limit */ + if (i == nr) + return nr; + /* check hugepage border */ + if (!((addr & ~huge_page_mask(h)) + >> PAGE_SHIFT)) + break; + } + } + return nr; + } +#endif + /* * Calculate how many pages there are left in the last level of the * PTE array for our address. diff --git a/mm/mlock.c b/mm/mlock.c index bd6f0e466f6..2b8335a8940 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -88,25 +88,22 @@ void mlock_vma_page(struct page *page) } } -/* - * called from munlock()/munmap() path with page supposedly on the LRU. +/** + * munlock_vma_page - munlock a vma page + * @page - page to be unlocked * - * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked - * [in try_to_munlock()] and then attempt to isolate the page. We must - * isolate the page to keep others from messing with its unevictable - * and mlocked state while trying to munlock. However, we pre-clear the - * mlocked state anyway as we might lose the isolation race and we might - * not get another chance to clear PageMlocked. If we successfully - * isolate the page and try_to_munlock() detects other VM_LOCKED vmas - * mapping the page, it will restore the PageMlocked state, unless the page - * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), - * perhaps redundantly. - * If we lose the isolation race, and the page is mapped by other VM_LOCKED - * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() - * either of which will restore the PageMlocked state by calling - * mlock_vma_page() above, if it can grab the vma's mmap sem. + * called from munlock()/munmap() path with page supposedly on the LRU. + * When we munlock a page, because the vma where we found the page is being + * munlock()ed or munmap()ed, we want to check whether other vmas hold the + * page locked so that we can leave it on the unevictable lru list and not + * bother vmscan with it. However, to walk the page's rmap list in + * try_to_munlock() we must isolate the page from the LRU. If some other + * task has removed the page from the LRU, we won't be able to do that. + * So we clear the PageMlocked as we might not get another chance. If we + * can't isolate the page, we leave it for putback_lru_page() and vmscan + * [page_referenced()/try_to_unmap()] to deal with. */ -static void munlock_vma_page(struct page *page) +void munlock_vma_page(struct page *page) { BUG_ON(!PageLocked(page)); @@ -117,18 +114,18 @@ static void munlock_vma_page(struct page *page) /* * did try_to_unlock() succeed or punt? */ - if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN) + if (ret != SWAP_MLOCK) count_vm_event(UNEVICTABLE_PGMUNLOCKED); putback_lru_page(page); } else { /* - * We lost the race. let try_to_unmap() deal - * with it. At least we get the page state and - * mlock stats right. However, page is still on - * the noreclaim list. We'll fix that up when - * the page is eventually freed or we scan the - * noreclaim list. + * Some other task has removed the page from the LRU. + * putback_lru_page() will take care of removing the + * page from the unevictable list, if necessary. + * vmscan [page_referenced()] will move the page back + * to the unevictable list if some other vma has it + * mlocked. */ if (PageUnevictable(page)) count_vm_event(UNEVICTABLE_PGSTRANDED); diff --git a/mm/mmap.c b/mm/mmap.c index ed70a68e882..d9c77b2dbe9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1198,8 +1198,20 @@ munmap_back: goto free_vma; } - if (vma_wants_writenotify(vma)) + if (vma_wants_writenotify(vma)) { + pgprot_t pprot = vma->vm_page_prot; + + /* Can vma->vm_page_prot have changed?? + * + * Answer: Yes, drivers may have changed it in their + * f_op->mmap method. + * + * Ensures that vmas marked as uncached stay that way. + */ vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); + if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot))) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + } vma_link(mm, vma, prev, rb_link, rb_parent); file = vma->vm_file; @@ -1811,10 +1823,10 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * Split a vma into two pieces at address 'addr', a new vma is allocated - * either for the first part or the tail. + * __split_vma() bypasses sysctl_max_map_count checking. We use this on the + * munmap path where it doesn't make sense to fail. */ -int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long addr, int new_below) { struct mempolicy *pol; @@ -1824,9 +1836,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, ~(huge_page_mask(hstate_vma(vma))))) return -EINVAL; - if (mm->map_count >= sysctl_max_map_count) - return -ENOMEM; - new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) return -ENOMEM; @@ -1866,6 +1875,19 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, return 0; } +/* + * Split a vma into two pieces at address 'addr', a new vma is allocated + * either for the first part or the tail. + */ +int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, int new_below) +{ + if (mm->map_count >= sysctl_max_map_count) + return -ENOMEM; + + return __split_vma(mm, vma, addr, new_below); +} + /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -1901,7 +1923,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > vma->vm_start) { - int error = split_vma(mm, vma, start, 0); + int error; + + /* + * Make sure that map_count on return from munmap() will + * not exceed its limit; but let map_count go just above + * its limit temporarily, to help free resources as expected. + */ + if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) + return -ENOMEM; + + error = __split_vma(mm, vma, start, 0); if (error) return error; prev = vma; @@ -1910,7 +1942,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { - int error = split_vma(mm, last, end, 1); + int error = __split_vma(mm, last, end, 1); if (error) return error; } diff --git a/mm/nommu.c b/mm/nommu.c index 9876fa0c3ad..8687973462b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1143,9 +1143,6 @@ static int do_mmap_private(struct vm_area_struct *vma, if (ret < rlen) memset(base + ret, 0, rlen - ret); - } else { - /* if it's an anonymous mapping, then just clear it */ - memset(base, 0, rlen); } return 0; @@ -1343,6 +1340,11 @@ unsigned long do_mmap_pgoff(struct file *file, goto error_just_free; add_nommu_region(region); + /* clear anonymous mappings that don't ask for uninitialized data */ + if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) + memset((void *)region->vm_start, 0, + region->vm_end - region->vm_start); + /* okay... we have a mapping; now we have to register it */ result = vma->vm_start; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ea2147dabba..492c98624fc 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -337,6 +337,21 @@ static void dump_tasks(const struct mem_cgroup *mem) } while_each_thread(g, p); } +static void dump_header(gfp_t gfp_mask, int order, struct mem_cgroup *mem) +{ + pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " + "oom_adj=%d\n", + current->comm, gfp_mask, order, current->signal->oom_adj); + task_lock(current); + cpuset_print_task_mems_allowed(current); + task_unlock(current); + dump_stack(); + mem_cgroup_print_oom_info(mem, current); + show_mem(); + if (sysctl_oom_dump_tasks) + dump_tasks(mem); +} + /* * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO @@ -395,20 +410,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, { struct task_struct *c; - if (printk_ratelimit()) { - printk(KERN_WARNING "%s invoked oom-killer: " - "gfp_mask=0x%x, order=%d, oom_adj=%d\n", - current->comm, gfp_mask, order, - current->signal->oom_adj); - task_lock(current); - cpuset_print_task_mems_allowed(current); - task_unlock(current); - dump_stack(); - mem_cgroup_print_oom_info(mem, current); - show_mem(); - if (sysctl_oom_dump_tasks) - dump_tasks(mem); - } + if (printk_ratelimit()) + dump_header(gfp_mask, order, mem); /* * If the task is already exiting, don't alarm the sysadmin or kill @@ -544,6 +547,7 @@ retry: /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { read_unlock(&tasklist_lock); + dump_header(gfp_mask, order, NULL); panic("Out of memory and no killable processes...\n"); } @@ -609,8 +613,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) /* Got some memory back in the last second. */ return; - if (sysctl_panic_on_oom == 2) + if (sysctl_panic_on_oom == 2) { + dump_header(gfp_mask, order, NULL); panic("out of memory. Compulsory panic_on_oom is selected.\n"); + } /* * Check if there were limitations on the allocation (only relevant for @@ -626,8 +632,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) break; case CONSTRAINT_NONE: - if (sysctl_panic_on_oom) + if (sysctl_panic_on_oom) { + dump_header(gfp_mask, order, NULL); panic("out of memory. panic_on_oom is selected\n"); + } /* Fall-through */ case CONSTRAINT_CPUSET: __out_of_memory(gfp_mask, order); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2bc2ac63f41..59d2e88fb47 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page, zone->free_area[order].nr_free++; } -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT /* * free_page_mlock() -- clean up attempts to free and mlocked() page. * Page should not be on lru, so no need to fix that up. @@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page) __dec_zone_page_state(page, NR_MLOCK); __count_vm_event(UNEVICTABLE_MLOCKFREED); } -#else -static void free_page_mlock(struct page *page) { } -#endif static inline int free_pages_check(struct page *page) { diff --git a/mm/page_io.c b/mm/page_io.c index c6f3e5071de..a19af956ee1 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -19,20 +19,15 @@ #include <linux/writeback.h> #include <asm/pgtable.h> -static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, +static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { struct bio *bio; bio = bio_alloc(gfp_flags, 1); if (bio) { - struct swap_info_struct *sis; - swp_entry_t entry = { .val = index, }; - - sis = get_swap_info_struct(swp_type(entry)); - bio->bi_sector = map_swap_page(sis, swp_offset(entry)) * - (PAGE_SIZE >> 9); - bio->bi_bdev = sis->bdev; + bio->bi_sector = map_swap_page(page, &bio->bi_bdev); + bio->bi_sector <<= PAGE_SHIFT - 9; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = PAGE_SIZE; bio->bi_io_vec[0].bv_offset = 0; @@ -102,8 +97,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) unlock_page(page); goto out; } - bio = get_swap_bio(GFP_NOIO, page_private(page), page, - end_swap_bio_write); + bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); @@ -127,8 +121,7 @@ int swap_readpage(struct page *page) VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); - bio = get_swap_bio(GFP_KERNEL, page_private(page), page, - end_swap_bio_read); + bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); ret = -ENOMEM; diff --git a/mm/pagewalk.c b/mm/pagewalk.c index d5878bed784..7b47a57b664 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -1,6 +1,7 @@ #include <linux/mm.h> #include <linux/highmem.h> #include <linux/sched.h> +#include <linux/hugetlb.h> static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -107,6 +108,7 @@ int walk_page_range(unsigned long addr, unsigned long end, pgd_t *pgd; unsigned long next; int err = 0; + struct vm_area_struct *vma; if (addr >= end) return err; @@ -117,11 +119,38 @@ int walk_page_range(unsigned long addr, unsigned long end, pgd = pgd_offset(walk->mm, addr); do { next = pgd_addr_end(addr, end); + + /* + * handle hugetlb vma individually because pagetable walk for + * the hugetlb page is dependent on the architecture and + * we can't handled it in the same manner as non-huge pages. + */ + vma = find_vma(walk->mm, addr); +#ifdef CONFIG_HUGETLB_PAGE + if (vma && is_vm_hugetlb_page(vma)) { + pte_t *pte; + struct hstate *hs; + + if (vma->vm_end < next) + next = vma->vm_end; + hs = hstate_vma(vma); + pte = huge_pte_offset(walk->mm, + addr & huge_page_mask(hs)); + if (pte && !huge_pte_none(huge_ptep_get(pte)) + && walk->hugetlb_entry) + err = walk->hugetlb_entry(pte, addr, + next, walk); + if (err) + break; + continue; + } +#endif if (pgd_none_or_clear_bad(pgd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; + pgd++; continue; } if (walk->pgd_entry) @@ -131,7 +160,8 @@ int walk_page_range(unsigned long addr, unsigned long end, err = walk_pud_range(pgd, addr, next, walk); if (err) break; - } while (pgd++, addr = next, addr != end); + pgd++; + } while (addr = next, addr != end); return err; } diff --git a/mm/rmap.c b/mm/rmap.c index dd43373a483..98135dbd25b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -49,6 +49,7 @@ #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> +#include <linux/ksm.h> #include <linux/rmap.h> #include <linux/rcupdate.h> #include <linux/module.h> @@ -67,7 +68,7 @@ static inline struct anon_vma *anon_vma_alloc(void) return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); } -static inline void anon_vma_free(struct anon_vma *anon_vma) +void anon_vma_free(struct anon_vma *anon_vma) { kmem_cache_free(anon_vma_cachep, anon_vma); } @@ -171,7 +172,7 @@ void anon_vma_unlink(struct vm_area_struct *vma) list_del(&vma->anon_vma_node); /* We must garbage collect the anon_vma if it's empty */ - empty = list_empty(&anon_vma->head); + empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); spin_unlock(&anon_vma->lock); if (empty) @@ -183,6 +184,7 @@ static void anon_vma_ctor(void *data) struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); + ksm_refcount_init(anon_vma); INIT_LIST_HEAD(&anon_vma->head); } @@ -202,8 +204,8 @@ struct anon_vma *page_lock_anon_vma(struct page *page) unsigned long anon_mapping; rcu_read_lock(); - anon_mapping = (unsigned long) page->mapping; - if (!(anon_mapping & PAGE_MAPPING_ANON)) + anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); + if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!page_mapped(page)) goto out; @@ -248,8 +250,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { if (PageAnon(page)) { - if ((void *)vma->anon_vma != - (void *)page->mapping - PAGE_MAPPING_ANON) + if (vma->anon_vma != page_anon_vma(page)) return -EFAULT; } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { if (!vma->vm_file || @@ -337,21 +338,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) * Subfunctions of page_referenced: page_referenced_one called * repeatedly from either page_referenced_anon or page_referenced_file. */ -static int page_referenced_one(struct page *page, - struct vm_area_struct *vma, - unsigned int *mapcount, - unsigned long *vm_flags) +int page_referenced_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, unsigned int *mapcount, + unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; spinlock_t *ptl; int referenced = 0; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; @@ -388,9 +383,10 @@ static int page_referenced_one(struct page *page, out_unmap: (*mapcount)--; pte_unmap_unlock(pte, ptl); -out: + if (referenced) *vm_flags |= vma->vm_flags; +out: return referenced; } @@ -409,6 +405,9 @@ static int page_referenced_anon(struct page *page, mapcount = page_mapcount(page); list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different @@ -416,7 +415,7 @@ static int page_referenced_anon(struct page *page, */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; - referenced += page_referenced_one(page, vma, + referenced += page_referenced_one(page, vma, address, &mapcount, vm_flags); if (!mapcount) break; @@ -474,6 +473,9 @@ static int page_referenced_file(struct page *page, mapcount = page_mapcount(page); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different @@ -481,7 +483,7 @@ static int page_referenced_file(struct page *page, */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; - referenced += page_referenced_one(page, vma, + referenced += page_referenced_one(page, vma, address, &mapcount, vm_flags); if (!mapcount) break; @@ -507,46 +509,47 @@ int page_referenced(struct page *page, unsigned long *vm_flags) { int referenced = 0; + int we_locked = 0; if (TestClearPageReferenced(page)) referenced++; *vm_flags = 0; - if (page_mapped(page) && page->mapping) { - if (PageAnon(page)) + if (page_mapped(page) && page_rmapping(page)) { + if (!is_locked && (!PageAnon(page) || PageKsm(page))) { + we_locked = trylock_page(page); + if (!we_locked) { + referenced++; + goto out; + } + } + if (unlikely(PageKsm(page))) + referenced += page_referenced_ksm(page, mem_cont, + vm_flags); + else if (PageAnon(page)) referenced += page_referenced_anon(page, mem_cont, vm_flags); - else if (is_locked) + else if (page->mapping) referenced += page_referenced_file(page, mem_cont, vm_flags); - else if (!trylock_page(page)) - referenced++; - else { - if (page->mapping) - referenced += page_referenced_file(page, - mem_cont, vm_flags); + if (we_locked) unlock_page(page); - } } - +out: if (page_test_and_clear_young(page)) referenced++; return referenced; } -static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) +static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, + unsigned long address) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; spinlock_t *ptl; int ret = 0; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 1); if (!pte) goto out; @@ -578,8 +581,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - if (vma->vm_flags & VM_SHARED) - ret += page_mkclean_one(page, vma); + if (vma->vm_flags & VM_SHARED) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret += page_mkclean_one(page, vma, address); + } } spin_unlock(&mapping->i_mmap_lock); return ret; @@ -620,14 +627,7 @@ static void __page_set_anon_rmap(struct page *page, BUG_ON(!anon_vma); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; - page->index = linear_page_index(vma, address); - - /* - * nr_mapped state can be updated without turning off - * interrupts because it is not modified via interrupt. - */ - __inc_zone_page_state(page, NR_ANON_PAGES); } /** @@ -665,14 +665,23 @@ static void __page_check_anon_rmap(struct page *page, * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * The caller needs to hold the pte lock and the page must be locked. + * The caller needs to hold the pte lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting, + * and to ensure that PageAnon is not being upgraded racily to PageKsm + * (but PageKsm is never downgraded to PageAnon). */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + int first = atomic_inc_and_test(&page->_mapcount); + if (first) + __inc_zone_page_state(page, NR_ANON_PAGES); + if (unlikely(PageKsm(page))) + return; + VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); - if (atomic_inc_and_test(&page->_mapcount)) + if (first) __page_set_anon_rmap(page, vma, address); else __page_check_anon_rmap(page, vma, address); @@ -694,6 +703,7 @@ void page_add_new_anon_rmap(struct page *page, VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ + __inc_zone_page_state(page, NR_ANON_PAGES); __page_set_anon_rmap(page, vma, address); if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE_ANON); @@ -760,20 +770,15 @@ void page_remove_rmap(struct page *page) * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, - enum ttu_flags flags) +int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, enum ttu_flags flags) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; pte_t pteval; spinlock_t *ptl; int ret = SWAP_AGAIN; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; @@ -784,10 +789,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * skipped over this mm) then we should reactivate it. */ if (!(flags & TTU_IGNORE_MLOCK)) { - if (vma->vm_flags & VM_LOCKED) { - ret = SWAP_MLOCK; + if (vma->vm_flags & VM_LOCKED) + goto out_mlock; + + if (TTU_ACTION(flags) == TTU_MUNLOCK) goto out_unmap; - } } if (!(flags & TTU_IGNORE_ACCESS)) { if (ptep_clear_flush_young_notify(vma, address, pte)) { @@ -822,7 +828,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * Store the swap location in the pte. * See handle_pte_fault() ... */ - swap_duplicate(entry); + if (swap_duplicate(entry) < 0) { + set_pte_at(mm, address, pte, pteval); + ret = SWAP_FAIL; + goto out_unmap; + } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) @@ -849,7 +859,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } else dec_mm_counter(mm, file_rss); - page_remove_rmap(page); page_cache_release(page); @@ -857,6 +866,27 @@ out_unmap: pte_unmap_unlock(pte, ptl); out: return ret; + +out_mlock: + pte_unmap_unlock(pte, ptl); + + + /* + * We need mmap_sem locking, Otherwise VM_LOCKED check makes + * unstable result and race. Plus, We can't wait here because + * we now hold anon_vma->lock or mapping->i_mmap_lock. + * if trylock failed, the page remain in evictable lru and later + * vmscan could retry to move the page to unevictable lru if the + * page is actually mlocked. + */ + if (down_read_trylock(&vma->vm_mm->mmap_sem)) { + if (vma->vm_flags & VM_LOCKED) { + mlock_vma_page(page); + ret = SWAP_MLOCK; + } + up_read(&vma->vm_mm->mmap_sem); + } + return ret; } /* @@ -922,11 +952,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, return ret; /* - * MLOCK_PAGES => feature is configured. - * if we can acquire the mmap_sem for read, and vma is VM_LOCKED, + * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, * keep the sem while scanning the cluster for mlocking pages. */ - if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { + if (down_read_trylock(&vma->vm_mm->mmap_sem)) { locked_vma = (vma->vm_flags & VM_LOCKED); if (!locked_vma) up_read(&vma->vm_mm->mmap_sem); /* don't need it */ @@ -976,29 +1005,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, return ret; } -/* - * common handling for pages mapped in VM_LOCKED vmas - */ -static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) -{ - int mlocked = 0; - - if (down_read_trylock(&vma->vm_mm->mmap_sem)) { - if (vma->vm_flags & VM_LOCKED) { - mlock_vma_page(page); - mlocked++; /* really mlocked the page */ - } - up_read(&vma->vm_mm->mmap_sem); - } - return mlocked; -} - /** * try_to_unmap_anon - unmap or unlock anonymous page using the object-based * rmap method * @page: the page to unmap/unlock - * @unlock: request for unlock rather than unmap [unlikely] - * @migration: unmapping for migration - ignored if @unlock + * @flags: action and flags * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. @@ -1014,42 +1025,22 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) { struct anon_vma *anon_vma; struct vm_area_struct *vma; - unsigned int mlocked = 0; int ret = SWAP_AGAIN; - int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; - - if (MLOCK_PAGES && unlikely(unlock)) - ret = SWAP_SUCCESS; /* default for try_to_munlock() */ anon_vma = page_lock_anon_vma(page); if (!anon_vma) return ret; list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - if (MLOCK_PAGES && unlikely(unlock)) { - if (!((vma->vm_flags & VM_LOCKED) && - page_mapped_in_vma(page, vma))) - continue; /* must visit all unlocked vmas */ - ret = SWAP_MLOCK; /* saw at least one mlocked vma */ - } else { - ret = try_to_unmap_one(page, vma, flags); - if (ret == SWAP_FAIL || !page_mapped(page)) - break; - } - if (ret == SWAP_MLOCK) { - mlocked = try_to_mlock_page(page, vma); - if (mlocked) - break; /* stop if actually mlocked page */ - } + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = try_to_unmap_one(page, vma, address, flags); + if (ret != SWAP_AGAIN || !page_mapped(page)) + break; } page_unlock_anon_vma(anon_vma); - - if (mlocked) - ret = SWAP_MLOCK; /* actually mlocked the page */ - else if (ret == SWAP_MLOCK) - ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ - return ret; } @@ -1079,48 +1070,30 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) unsigned long max_nl_cursor = 0; unsigned long max_nl_size = 0; unsigned int mapcount; - unsigned int mlocked = 0; - int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; - - if (MLOCK_PAGES && unlikely(unlock)) - ret = SWAP_SUCCESS; /* default for try_to_munlock() */ spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - if (MLOCK_PAGES && unlikely(unlock)) { - if (!((vma->vm_flags & VM_LOCKED) && - page_mapped_in_vma(page, vma))) - continue; /* must visit all vmas */ - ret = SWAP_MLOCK; - } else { - ret = try_to_unmap_one(page, vma, flags); - if (ret == SWAP_FAIL || !page_mapped(page)) - goto out; - } - if (ret == SWAP_MLOCK) { - mlocked = try_to_mlock_page(page, vma); - if (mlocked) - break; /* stop if actually mlocked page */ - } + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = try_to_unmap_one(page, vma, address, flags); + if (ret != SWAP_AGAIN || !page_mapped(page)) + goto out; } - if (mlocked) + if (list_empty(&mapping->i_mmap_nonlinear)) goto out; - if (list_empty(&mapping->i_mmap_nonlinear)) + /* + * We don't bother to try to find the munlocked page in nonlinears. + * It's costly. Instead, later, page reclaim logic may call + * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. + */ + if (TTU_ACTION(flags) == TTU_MUNLOCK) goto out; list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (MLOCK_PAGES && unlikely(unlock)) { - if (!(vma->vm_flags & VM_LOCKED)) - continue; /* must visit all vmas */ - ret = SWAP_MLOCK; /* leave mlocked == 0 */ - goto out; /* no need to look further */ - } - if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && - (vma->vm_flags & VM_LOCKED)) - continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) max_nl_cursor = cursor; @@ -1153,16 +1126,12 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && - (vma->vm_flags & VM_LOCKED)) - continue; cursor = (unsigned long) vma->vm_private_data; while ( cursor < max_nl_cursor && cursor < vma->vm_end - vma->vm_start) { - ret = try_to_unmap_cluster(cursor, &mapcount, - vma, page); - if (ret == SWAP_MLOCK) - mlocked = 2; /* to return below */ + if (try_to_unmap_cluster(cursor, &mapcount, + vma, page) == SWAP_MLOCK) + ret = SWAP_MLOCK; cursor += CLUSTER_SIZE; vma->vm_private_data = (void *) cursor; if ((int)mapcount <= 0) @@ -1183,10 +1152,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) vma->vm_private_data = NULL; out: spin_unlock(&mapping->i_mmap_lock); - if (mlocked) - ret = SWAP_MLOCK; /* actually mlocked the page */ - else if (ret == SWAP_MLOCK) - ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ return ret; } @@ -1210,7 +1175,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) BUG_ON(!PageLocked(page)); - if (PageAnon(page)) + if (unlikely(PageKsm(page))) + ret = try_to_unmap_ksm(page, flags); + else if (PageAnon(page)) ret = try_to_unmap_anon(page, flags); else ret = try_to_unmap_file(page, flags); @@ -1229,17 +1196,98 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) * * Return values are: * - * SWAP_SUCCESS - no vma's holding page mlocked. + * SWAP_AGAIN - no vma is holding page mlocked, or, * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem + * SWAP_FAIL - page cannot be located at present * SWAP_MLOCK - page is now mlocked. */ int try_to_munlock(struct page *page) { VM_BUG_ON(!PageLocked(page) || PageLRU(page)); - if (PageAnon(page)) + if (unlikely(PageKsm(page))) + return try_to_unmap_ksm(page, TTU_MUNLOCK); + else if (PageAnon(page)) return try_to_unmap_anon(page, TTU_MUNLOCK); else return try_to_unmap_file(page, TTU_MUNLOCK); } +#ifdef CONFIG_MIGRATION +/* + * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): + * Called by migrate.c to remove migration ptes, but might be used more later. + */ +static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct anon_vma *anon_vma; + struct vm_area_struct *vma; + int ret = SWAP_AGAIN; + + /* + * Note: remove_migration_ptes() cannot use page_lock_anon_vma() + * because that depends on page_mapped(); but not all its usages + * are holding mmap_sem, which also gave the necessary guarantee + * (that this anon_vma's slab has not already been destroyed). + * This needs to be reviewed later: avoiding page_lock_anon_vma() + * is risky, and currently limits the usefulness of rmap_walk(). + */ + anon_vma = page_anon_vma(page); + if (!anon_vma) + return ret; + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = rmap_one(page, vma, address, arg); + if (ret != SWAP_AGAIN) + break; + } + spin_unlock(&anon_vma->lock); + return ret; +} + +static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct address_space *mapping = page->mapping; + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + struct prio_tree_iter iter; + int ret = SWAP_AGAIN; + + if (!mapping) + return ret; + spin_lock(&mapping->i_mmap_lock); + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = rmap_one(page, vma, address, arg); + if (ret != SWAP_AGAIN) + break; + } + /* + * No nonlinear handling: being always shared, nonlinear vmas + * never contain migration ptes. Decide what to do about this + * limitation to linear when we need rmap_walk() on nonlinear. + */ + spin_unlock(&mapping->i_mmap_lock); + return ret; +} + +int rmap_walk(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + VM_BUG_ON(!PageLocked(page)); + + if (unlikely(PageKsm(page))) + return rmap_walk_ksm(page, rmap_one, arg); + else if (PageAnon(page)) + return rmap_walk_anon(page, rmap_one, arg); + else + return rmap_walk_file(page, rmap_one, arg); +} +#endif /* CONFIG_MIGRATION */ diff --git a/mm/shmem.c b/mm/shmem.c index 356dd99566e..4fb41c83dac 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page) goto out; } mutex_unlock(&shmem_swaplist_mutex); -out: return found; /* 0 or 1 or -ENOMEM */ + /* + * Can some race bring us here? We've been holding page lock, + * so I think not; but would rather try again later than BUG() + */ + unlock_page(page); + page_cache_release(page); +out: + return (found < 0) ? found : 0; } /* @@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) else inode = NULL; spin_unlock(&info->lock); - swap_duplicate(swap); + swap_shmem_alloc(swap); BUG_ON(page_mapped(page)); page_cache_release(page); /* pagecache ref */ swap_writepage(page, wbc); diff --git a/mm/swapfile.c b/mm/swapfile.c index 9c590eef791..6c0585b1641 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -22,6 +22,7 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/backing-dev.h> @@ -35,11 +36,15 @@ #include <linux/swapops.h> #include <linux/page_cgroup.h> +static bool swap_count_continued(struct swap_info_struct *, pgoff_t, + unsigned char); +static void free_swap_count_continuations(struct swap_info_struct *); +static sector_t map_swap_entry(swp_entry_t, struct block_device**); + static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; long nr_swap_pages; long total_swap_pages; -static int swap_overflow; static int least_priority; static const char Bad_file[] = "Bad swap file entry "; @@ -49,42 +54,20 @@ static const char Unused_offset[] = "Unused swap offset entry "; static struct swap_list_t swap_list = {-1, -1}; -static struct swap_info_struct swap_info[MAX_SWAPFILES]; +static struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); -/* For reference count accounting in swap_map */ -/* enum for swap_map[] handling. internal use only */ -enum { - SWAP_MAP = 0, /* ops for reference from swap users */ - SWAP_CACHE, /* ops for reference from swap cache */ -}; - -static inline int swap_count(unsigned short ent) -{ - return ent & SWAP_COUNT_MASK; -} - -static inline bool swap_has_cache(unsigned short ent) +static inline unsigned char swap_count(unsigned char ent) { - return !!(ent & SWAP_HAS_CACHE); + return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ } -static inline unsigned short encode_swapmap(int count, bool has_cache) -{ - unsigned short ret = count; - - if (has_cache) - return SWAP_HAS_CACHE | ret; - return ret; -} - -/* returnes 1 if swap entry is freed */ +/* returns 1 if swap entry is freed */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) { - int type = si - swap_info; - swp_entry_t entry = swp_entry(type, offset); + swp_entry_t entry = swp_entry(si->type, offset); struct page *page; int ret = 0; @@ -120,7 +103,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) down_read(&swap_unplug_sem); entry.val = page_private(page); if (PageSwapCache(page)) { - struct block_device *bdev = swap_info[swp_type(entry)].bdev; + struct block_device *bdev = swap_info[swp_type(entry)]->bdev; struct backing_dev_info *bdi; /* @@ -146,23 +129,28 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) static int discard_swap(struct swap_info_struct *si) { struct swap_extent *se; + sector_t start_block; + sector_t nr_blocks; int err = 0; - list_for_each_entry(se, &si->extent_list, list) { - sector_t start_block = se->start_block << (PAGE_SHIFT - 9); - sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); + /* Do not discard the swap header page! */ + se = &si->first_swap_extent; + start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); + nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); + if (nr_blocks) { + err = blkdev_issue_discard(si->bdev, start_block, + nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER); + if (err) + return err; + cond_resched(); + } - if (se->start_page == 0) { - /* Do not discard the swap header page! */ - start_block += 1 << (PAGE_SHIFT - 9); - nr_blocks -= 1 << (PAGE_SHIFT - 9); - if (!nr_blocks) - continue; - } + list_for_each_entry(se, &si->first_swap_extent.list, list) { + start_block = se->start_block << (PAGE_SHIFT - 9); + nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_KERNEL, - DISCARD_FL_BARRIER); + nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER); if (err) break; @@ -201,14 +189,11 @@ static void discard_swap_cluster(struct swap_info_struct *si, start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_NOIO, - DISCARD_FL_BARRIER)) + nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER)) break; } lh = se->list.next; - if (lh == &si->extent_list) - lh = lh->next; se = list_entry(lh, struct swap_extent, list); } } @@ -223,7 +208,7 @@ static int wait_for_discard(void *word) #define LATENCY_LIMIT 256 static inline unsigned long scan_swap_map(struct swap_info_struct *si, - int cache) + unsigned char usage) { unsigned long offset; unsigned long scan_base; @@ -354,10 +339,7 @@ checks: si->lowest_bit = si->max; si->highest_bit = 0; } - if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */ - si->swap_map[offset] = encode_swapmap(0, true); - else /* at suspend */ - si->swap_map[offset] = encode_swapmap(1, false); + si->swap_map[offset] = usage; si->cluster_next = offset + 1; si->flags -= SWP_SCANNING; @@ -467,10 +449,10 @@ swp_entry_t get_swap_page(void) nr_swap_pages--; for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { - si = swap_info + type; + si = swap_info[type]; next = si->next; if (next < 0 || - (!wrapped && si->prio != swap_info[next].prio)) { + (!wrapped && si->prio != swap_info[next]->prio)) { next = swap_list.head; wrapped++; } @@ -482,7 +464,7 @@ swp_entry_t get_swap_page(void) swap_list.next = next; /* This is called for allocating swap entry for cache */ - offset = scan_swap_map(si, SWAP_CACHE); + offset = scan_swap_map(si, SWAP_HAS_CACHE); if (offset) { spin_unlock(&swap_lock); return swp_entry(type, offset); @@ -503,11 +485,11 @@ swp_entry_t get_swap_page_of_type(int type) pgoff_t offset; spin_lock(&swap_lock); - si = swap_info + type; - if (si->flags & SWP_WRITEOK) { + si = swap_info[type]; + if (si && (si->flags & SWP_WRITEOK)) { nr_swap_pages--; /* This is called for allocating swap entry, not cache */ - offset = scan_swap_map(si, SWAP_MAP); + offset = scan_swap_map(si, 1); if (offset) { spin_unlock(&swap_lock); return swp_entry(type, offset); @@ -518,9 +500,9 @@ swp_entry_t get_swap_page_of_type(int type) return (swp_entry_t) {0}; } -static struct swap_info_struct * swap_info_get(swp_entry_t entry) +static struct swap_info_struct *swap_info_get(swp_entry_t entry) { - struct swap_info_struct * p; + struct swap_info_struct *p; unsigned long offset, type; if (!entry.val) @@ -528,7 +510,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry) type = swp_type(entry); if (type >= nr_swapfiles) goto bad_nofile; - p = & swap_info[type]; + p = swap_info[type]; if (!(p->flags & SWP_USED)) goto bad_device; offset = swp_offset(entry); @@ -554,41 +536,56 @@ out: return NULL; } -static int swap_entry_free(struct swap_info_struct *p, - swp_entry_t ent, int cache) +static unsigned char swap_entry_free(struct swap_info_struct *p, + swp_entry_t entry, unsigned char usage) { - unsigned long offset = swp_offset(ent); - int count = swap_count(p->swap_map[offset]); - bool has_cache; + unsigned long offset = swp_offset(entry); + unsigned char count; + unsigned char has_cache; - has_cache = swap_has_cache(p->swap_map[offset]); + count = p->swap_map[offset]; + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; - if (cache == SWAP_MAP) { /* dropping usage count of swap */ - if (count < SWAP_MAP_MAX) { - count--; - p->swap_map[offset] = encode_swapmap(count, has_cache); - } - } else { /* dropping swap cache flag */ + if (usage == SWAP_HAS_CACHE) { VM_BUG_ON(!has_cache); - p->swap_map[offset] = encode_swapmap(count, false); - + has_cache = 0; + } else if (count == SWAP_MAP_SHMEM) { + /* + * Or we could insist on shmem.c using a special + * swap_shmem_free() and free_shmem_swap_and_cache()... + */ + count = 0; + } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { + if (count == COUNT_CONTINUED) { + if (swap_count_continued(p, offset, count)) + count = SWAP_MAP_MAX | COUNT_CONTINUED; + else + count = SWAP_MAP_MAX; + } else + count--; } - /* return code. */ - count = p->swap_map[offset]; + + if (!count) + mem_cgroup_uncharge_swap(entry); + + usage = count | has_cache; + p->swap_map[offset] = usage; + /* free if no reference */ - if (!count) { + if (!usage) { if (offset < p->lowest_bit) p->lowest_bit = offset; if (offset > p->highest_bit) p->highest_bit = offset; - if (p->prio > swap_info[swap_list.next].prio) - swap_list.next = p - swap_info; + if (swap_list.next >= 0 && + p->prio > swap_info[swap_list.next]->prio) + swap_list.next = p->type; nr_swap_pages++; p->inuse_pages--; } - if (!swap_count(count)) - mem_cgroup_uncharge_swap(ent); - return count; + + return usage; } /* @@ -597,11 +594,11 @@ static int swap_entry_free(struct swap_info_struct *p, */ void swap_free(swp_entry_t entry) { - struct swap_info_struct * p; + struct swap_info_struct *p; p = swap_info_get(entry); if (p) { - swap_entry_free(p, entry, SWAP_MAP); + swap_entry_free(p, entry, 1); spin_unlock(&swap_lock); } } @@ -612,26 +609,21 @@ void swap_free(swp_entry_t entry) void swapcache_free(swp_entry_t entry, struct page *page) { struct swap_info_struct *p; - int ret; + unsigned char count; p = swap_info_get(entry); if (p) { - ret = swap_entry_free(p, entry, SWAP_CACHE); - if (page) { - bool swapout; - if (ret) - swapout = true; /* the end of swap out */ - else - swapout = false; /* no more swap users! */ - mem_cgroup_uncharge_swapcache(page, entry, swapout); - } + count = swap_entry_free(p, entry, SWAP_HAS_CACHE); + if (page) + mem_cgroup_uncharge_swapcache(page, entry, count != 0); spin_unlock(&swap_lock); } - return; } /* * How many references to page are currently swapped out? + * This does not give an exact answer when swap count is continued, + * but does include the high COUNT_CONTINUED flag to allow for that. */ static inline int page_swapcount(struct page *page) { @@ -659,6 +651,8 @@ int reuse_swap_page(struct page *page) int count; VM_BUG_ON(!PageLocked(page)); + if (unlikely(PageKsm(page))) + return 0; count = page_mapcount(page); if (count <= 1 && PageSwapCache(page)) { count += page_swapcount(page); @@ -667,7 +661,7 @@ int reuse_swap_page(struct page *page) SetPageDirty(page); } } - return count == 1; + return count <= 1; } /* @@ -704,7 +698,7 @@ int free_swap_and_cache(swp_entry_t entry) p = swap_info_get(entry); if (p) { - if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) { + if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) { page = find_get_page(&swapper_space, entry.val); if (page && !trylock_page(page)) { page_cache_release(page); @@ -741,14 +735,14 @@ int free_swap_and_cache(swp_entry_t entry) int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) { struct block_device *bdev = NULL; - int i; + int type; if (device) bdev = bdget(device); spin_lock(&swap_lock); - for (i = 0; i < nr_swapfiles; i++) { - struct swap_info_struct *sis = swap_info + i; + for (type = 0; type < nr_swapfiles; type++) { + struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; @@ -758,20 +752,18 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) *bdev_p = bdgrab(sis->bdev); spin_unlock(&swap_lock); - return i; + return type; } if (bdev == sis->bdev) { - struct swap_extent *se; + struct swap_extent *se = &sis->first_swap_extent; - se = list_entry(sis->extent_list.next, - struct swap_extent, list); if (se->start_block == offset) { if (bdev_p) *bdev_p = bdgrab(sis->bdev); spin_unlock(&swap_lock); bdput(bdev); - return i; + return type; } } } @@ -783,6 +775,21 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) } /* + * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev + * corresponding to given index in swap_info (swap type). + */ +sector_t swapdev_block(int type, pgoff_t offset) +{ + struct block_device *bdev; + + if ((unsigned int)type >= nr_swapfiles) + return 0; + if (!(swap_info[type]->flags & SWP_WRITEOK)) + return 0; + return map_swap_entry(swp_entry(type, offset), &bdev); +} + +/* * Return either the total number of swap pages of given type, or the number * of free pages of that type (depending on @free) * @@ -792,18 +799,20 @@ unsigned int count_swap_pages(int type, int free) { unsigned int n = 0; - if (type < nr_swapfiles) { - spin_lock(&swap_lock); - if (swap_info[type].flags & SWP_WRITEOK) { - n = swap_info[type].pages; + spin_lock(&swap_lock); + if ((unsigned int)type < nr_swapfiles) { + struct swap_info_struct *sis = swap_info[type]; + + if (sis->flags & SWP_WRITEOK) { + n = sis->pages; if (free) - n -= swap_info[type].inuse_pages; + n -= sis->inuse_pages; } - spin_unlock(&swap_lock); } + spin_unlock(&swap_lock); return n; } -#endif +#endif /* CONFIG_HIBERNATION */ /* * No need to decide whether this PTE shares the swap entry with others, @@ -932,7 +941,7 @@ static int unuse_vma(struct vm_area_struct *vma, unsigned long addr, end, next; int ret; - if (page->mapping) { + if (page_anon_vma(page)) { addr = page_address_in_vma(page, vma); if (addr == -EFAULT) return 0; @@ -988,7 +997,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, { unsigned int max = si->max; unsigned int i = prev; - int count; + unsigned char count; /* * No need for swap_lock here: we're just looking @@ -1024,16 +1033,14 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, */ static int try_to_unuse(unsigned int type) { - struct swap_info_struct * si = &swap_info[type]; + struct swap_info_struct *si = swap_info[type]; struct mm_struct *start_mm; - unsigned short *swap_map; - unsigned short swcount; + unsigned char *swap_map; + unsigned char swcount; struct page *page; swp_entry_t entry; unsigned int i = 0; int retval = 0; - int reset_overflow = 0; - int shmem; /* * When searching mms for an entry, a good strategy is to @@ -1047,8 +1054,7 @@ static int try_to_unuse(unsigned int type) * together, child after parent. If we race with dup_mmap(), we * prefer to resolve parent before child, lest we miss entries * duplicated after we scanned child: using last mm would invert - * that. Though it's only a serious concern when an overflowed - * swap count is reset from SWAP_MAP_MAX, preventing a rescan. + * that. */ start_mm = &init_mm; atomic_inc(&init_mm.mm_users); @@ -1110,17 +1116,18 @@ static int try_to_unuse(unsigned int type) /* * Remove all references to entry. - * Whenever we reach init_mm, there's no address space - * to search, but use it as a reminder to search shmem. */ - shmem = 0; swcount = *swap_map; - if (swap_count(swcount)) { - if (start_mm == &init_mm) - shmem = shmem_unuse(entry, page); - else - retval = unuse_mm(start_mm, entry, page); + if (swap_count(swcount) == SWAP_MAP_SHMEM) { + retval = shmem_unuse(entry, page); + /* page has already been unlocked and released */ + if (retval < 0) + break; + continue; } + if (swap_count(swcount) && start_mm != &init_mm) + retval = unuse_mm(start_mm, entry, page); + if (swap_count(*swap_map)) { int set_start_mm = (*swap_map >= swcount); struct list_head *p = &start_mm->mmlist; @@ -1131,7 +1138,7 @@ static int try_to_unuse(unsigned int type) atomic_inc(&new_start_mm->mm_users); atomic_inc(&prev_mm->mm_users); spin_lock(&mmlist_lock); - while (swap_count(*swap_map) && !retval && !shmem && + while (swap_count(*swap_map) && !retval && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); if (!atomic_inc_not_zero(&mm->mm_users)) @@ -1145,10 +1152,9 @@ static int try_to_unuse(unsigned int type) swcount = *swap_map; if (!swap_count(swcount)) /* any usage ? */ ; - else if (mm == &init_mm) { + else if (mm == &init_mm) set_start_mm = 1; - shmem = shmem_unuse(entry, page); - } else + else retval = unuse_mm(mm, entry, page); if (set_start_mm && *swap_map < swcount) { @@ -1164,13 +1170,6 @@ static int try_to_unuse(unsigned int type) mmput(start_mm); start_mm = new_start_mm; } - if (shmem) { - /* page has already been unlocked and released */ - if (shmem > 0) - continue; - retval = shmem; - break; - } if (retval) { unlock_page(page); page_cache_release(page); @@ -1178,30 +1177,6 @@ static int try_to_unuse(unsigned int type) } /* - * How could swap count reach 0x7ffe ? - * There's no way to repeat a swap page within an mm - * (except in shmem, where it's the shared object which takes - * the reference count)? - * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned - * short is too small....) - * If that's wrong, then we should worry more about - * exit_mmap() and do_munmap() cases described above: - * we might be resetting SWAP_MAP_MAX too early here. - * We know "Undead"s can happen, they're okay, so don't - * report them; but do report if we reset SWAP_MAP_MAX. - */ - /* We might release the lock_page() in unuse_mm(). */ - if (!PageSwapCache(page) || page_private(page) != entry.val) - goto retry; - - if (swap_count(*swap_map) == SWAP_MAP_MAX) { - spin_lock(&swap_lock); - *swap_map = encode_swapmap(0, true); - spin_unlock(&swap_lock); - reset_overflow = 1; - } - - /* * If a reference remains (rare), we would like to leave * the page in the swap cache; but try_to_unmap could * then re-duplicate the entry once we drop page lock, @@ -1213,6 +1188,12 @@ static int try_to_unuse(unsigned int type) * read from disk into another page. Splitting into two * pages would be incorrect if swap supported "shared * private" pages, but they are handled by tmpfs files. + * + * Given how unuse_vma() targets one particular offset + * in an anon_vma, once the anon_vma has been determined, + * this splitting happens to be just what is needed to + * handle where KSM pages have been swapped out: re-reading + * is unnecessarily slow, but we can fix that later on. */ if (swap_count(*swap_map) && PageDirty(page) && PageSwapCache(page)) { @@ -1242,7 +1223,6 @@ static int try_to_unuse(unsigned int type) * mark page dirty so shrink_page_list will preserve it. */ SetPageDirty(page); -retry: unlock_page(page); page_cache_release(page); @@ -1254,10 +1234,6 @@ retry: } mmput(start_mm); - if (reset_overflow) { - printk(KERN_WARNING "swapoff: cleared swap entry overflow\n"); - swap_overflow = 0; - } return retval; } @@ -1270,10 +1246,10 @@ retry: static void drain_mmlist(void) { struct list_head *p, *next; - unsigned int i; + unsigned int type; - for (i = 0; i < nr_swapfiles; i++) - if (swap_info[i].inuse_pages) + for (type = 0; type < nr_swapfiles; type++) + if (swap_info[type]->inuse_pages) return; spin_lock(&mmlist_lock); list_for_each_safe(p, next, &init_mm.mmlist) @@ -1283,12 +1259,23 @@ static void drain_mmlist(void) /* * Use this swapdev's extent info to locate the (PAGE_SIZE) block which - * corresponds to page offset `offset'. + * corresponds to page offset for the specified swap entry. + * Note that the type of this function is sector_t, but it returns page offset + * into the bdev, not sector offset. */ -sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) +static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) { - struct swap_extent *se = sis->curr_swap_extent; - struct swap_extent *start_se = se; + struct swap_info_struct *sis; + struct swap_extent *start_se; + struct swap_extent *se; + pgoff_t offset; + + sis = swap_info[swp_type(entry)]; + *bdev = sis->bdev; + + offset = swp_offset(entry); + start_se = sis->curr_swap_extent; + se = start_se; for ( ; ; ) { struct list_head *lh; @@ -1298,40 +1285,31 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) return se->start_block + (offset - se->start_page); } lh = se->list.next; - if (lh == &sis->extent_list) - lh = lh->next; se = list_entry(lh, struct swap_extent, list); sis->curr_swap_extent = se; BUG_ON(se == start_se); /* It *must* be present */ } } -#ifdef CONFIG_HIBERNATION /* - * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev - * corresponding to given index in swap_info (swap type). + * Returns the page offset into bdev for the specified page's swap entry. */ -sector_t swapdev_block(int swap_type, pgoff_t offset) +sector_t map_swap_page(struct page *page, struct block_device **bdev) { - struct swap_info_struct *sis; - - if (swap_type >= nr_swapfiles) - return 0; - - sis = swap_info + swap_type; - return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0; + swp_entry_t entry; + entry.val = page_private(page); + return map_swap_entry(entry, bdev); } -#endif /* CONFIG_HIBERNATION */ /* * Free all of a swapdev's extent information */ static void destroy_swap_extents(struct swap_info_struct *sis) { - while (!list_empty(&sis->extent_list)) { + while (!list_empty(&sis->first_swap_extent.list)) { struct swap_extent *se; - se = list_entry(sis->extent_list.next, + se = list_entry(sis->first_swap_extent.list.next, struct swap_extent, list); list_del(&se->list); kfree(se); @@ -1352,8 +1330,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, struct swap_extent *new_se; struct list_head *lh; - lh = sis->extent_list.prev; /* The highest page extent */ - if (lh != &sis->extent_list) { + if (start_page == 0) { + se = &sis->first_swap_extent; + sis->curr_swap_extent = se; + se->start_page = 0; + se->nr_pages = nr_pages; + se->start_block = start_block; + return 1; + } else { + lh = sis->first_swap_extent.list.prev; /* Highest extent */ se = list_entry(lh, struct swap_extent, list); BUG_ON(se->start_page + se->nr_pages != start_page); if (se->start_block + se->nr_pages == start_block) { @@ -1373,7 +1358,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, new_se->nr_pages = nr_pages; new_se->start_block = start_block; - list_add_tail(&new_se->list, &sis->extent_list); + list_add_tail(&new_se->list, &sis->first_swap_extent.list); return 1; } @@ -1425,7 +1410,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) if (S_ISBLK(inode->i_mode)) { ret = add_swap_extent(sis, 0, sis->max, 0); *span = sis->pages; - goto done; + goto out; } blkbits = inode->i_blkbits; @@ -1496,25 +1481,22 @@ reprobe: sis->max = page_no; sis->pages = page_no - 1; sis->highest_bit = page_no - 1; -done: - sis->curr_swap_extent = list_entry(sis->extent_list.prev, - struct swap_extent, list); - goto out; +out: + return ret; bad_bmap: printk(KERN_ERR "swapon: swapfile has holes\n"); ret = -EINVAL; -out: - return ret; + goto out; } SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { - struct swap_info_struct * p = NULL; - unsigned short *swap_map; + struct swap_info_struct *p = NULL; + unsigned char *swap_map; struct file *swap_file, *victim; struct address_space *mapping; struct inode *inode; - char * pathname; + char *pathname; int i, type, prev; int err; @@ -1535,8 +1517,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) mapping = victim->f_mapping; prev = -1; spin_lock(&swap_lock); - for (type = swap_list.head; type >= 0; type = swap_info[type].next) { - p = swap_info + type; + for (type = swap_list.head; type >= 0; type = swap_info[type]->next) { + p = swap_info[type]; if (p->flags & SWP_WRITEOK) { if (p->swap_file->f_mapping == mapping) break; @@ -1555,18 +1537,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); goto out_dput; } - if (prev < 0) { + if (prev < 0) swap_list.head = p->next; - } else { - swap_info[prev].next = p->next; - } + else + swap_info[prev]->next = p->next; if (type == swap_list.next) { /* just pick something that's safe... */ swap_list.next = swap_list.head; } if (p->prio < 0) { - for (i = p->next; i >= 0; i = swap_info[i].next) - swap_info[i].prio = p->prio--; + for (i = p->next; i >= 0; i = swap_info[i]->next) + swap_info[i]->prio = p->prio--; least_priority++; } nr_swap_pages -= p->pages; @@ -1584,16 +1565,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) if (p->prio < 0) p->prio = --least_priority; prev = -1; - for (i = swap_list.head; i >= 0; i = swap_info[i].next) { - if (p->prio >= swap_info[i].prio) + for (i = swap_list.head; i >= 0; i = swap_info[i]->next) { + if (p->prio >= swap_info[i]->prio) break; prev = i; } p->next = i; if (prev < 0) - swap_list.head = swap_list.next = p - swap_info; + swap_list.head = swap_list.next = type; else - swap_info[prev].next = p - swap_info; + swap_info[prev]->next = type; nr_swap_pages += p->pages; total_swap_pages += p->pages; p->flags |= SWP_WRITEOK; @@ -1606,6 +1587,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) up_write(&swap_unplug_sem); destroy_swap_extents(p); + if (p->flags & SWP_CONTINUED) + free_swap_count_continuations(p); + mutex_lock(&swapon_mutex); spin_lock(&swap_lock); drain_mmlist(); @@ -1653,8 +1637,8 @@ out: /* iterator */ static void *swap_start(struct seq_file *swap, loff_t *pos) { - struct swap_info_struct *ptr = swap_info; - int i; + struct swap_info_struct *si; + int type; loff_t l = *pos; mutex_lock(&swapon_mutex); @@ -1662,11 +1646,13 @@ static void *swap_start(struct seq_file *swap, loff_t *pos) if (!l) return SEQ_START_TOKEN; - for (i = 0; i < nr_swapfiles; i++, ptr++) { - if (!(ptr->flags & SWP_USED) || !ptr->swap_map) + for (type = 0; type < nr_swapfiles; type++) { + smp_rmb(); /* read nr_swapfiles before swap_info[type] */ + si = swap_info[type]; + if (!(si->flags & SWP_USED) || !si->swap_map) continue; if (!--l) - return ptr; + return si; } return NULL; @@ -1674,21 +1660,21 @@ static void *swap_start(struct seq_file *swap, loff_t *pos) static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) { - struct swap_info_struct *ptr; - struct swap_info_struct *endptr = swap_info + nr_swapfiles; + struct swap_info_struct *si = v; + int type; if (v == SEQ_START_TOKEN) - ptr = swap_info; - else { - ptr = v; - ptr++; - } + type = 0; + else + type = si->type + 1; - for (; ptr < endptr; ptr++) { - if (!(ptr->flags & SWP_USED) || !ptr->swap_map) + for (; type < nr_swapfiles; type++) { + smp_rmb(); /* read nr_swapfiles before swap_info[type] */ + si = swap_info[type]; + if (!(si->flags & SWP_USED) || !si->swap_map) continue; ++*pos; - return ptr; + return si; } return NULL; @@ -1701,24 +1687,24 @@ static void swap_stop(struct seq_file *swap, void *v) static int swap_show(struct seq_file *swap, void *v) { - struct swap_info_struct *ptr = v; + struct swap_info_struct *si = v; struct file *file; int len; - if (ptr == SEQ_START_TOKEN) { + if (si == SEQ_START_TOKEN) { seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); return 0; } - file = ptr->swap_file; + file = si->swap_file; len = seq_path(swap, &file->f_path, " \t\n\\"); seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? "partition" : "file\t", - ptr->pages << (PAGE_SHIFT - 10), - ptr->inuse_pages << (PAGE_SHIFT - 10), - ptr->prio); + si->pages << (PAGE_SHIFT - 10), + si->inuse_pages << (PAGE_SHIFT - 10), + si->prio); return 0; } @@ -1765,7 +1751,7 @@ late_initcall(max_swapfiles_check); */ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) { - struct swap_info_struct * p; + struct swap_info_struct *p; char *name = NULL; struct block_device *bdev = NULL; struct file *swap_file = NULL; @@ -1779,30 +1765,52 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) sector_t span; unsigned long maxpages = 1; unsigned long swapfilepages; - unsigned short *swap_map = NULL; + unsigned char *swap_map = NULL; struct page *page = NULL; struct inode *inode = NULL; int did_down = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + spin_lock(&swap_lock); - p = swap_info; - for (type = 0 ; type < nr_swapfiles ; type++,p++) - if (!(p->flags & SWP_USED)) + for (type = 0; type < nr_swapfiles; type++) { + if (!(swap_info[type]->flags & SWP_USED)) break; + } error = -EPERM; if (type >= MAX_SWAPFILES) { spin_unlock(&swap_lock); + kfree(p); goto out; } - if (type >= nr_swapfiles) - nr_swapfiles = type+1; - memset(p, 0, sizeof(*p)); - INIT_LIST_HEAD(&p->extent_list); + if (type >= nr_swapfiles) { + p->type = type; + swap_info[type] = p; + /* + * Write swap_info[type] before nr_swapfiles, in case a + * racing procfs swap_start() or swap_next() is reading them. + * (We never shrink nr_swapfiles, we never free this entry.) + */ + smp_wmb(); + nr_swapfiles++; + } else { + kfree(p); + p = swap_info[type]; + /* + * Do not memset this entry: a racing procfs swap_next() + * would be relying on p->type to remain valid. + */ + } + INIT_LIST_HEAD(&p->first_swap_extent.list); p->flags = SWP_USED; p->next = -1; spin_unlock(&swap_lock); + name = getname(specialfile); error = PTR_ERR(name); if (IS_ERR(name)) { @@ -1822,7 +1830,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = -EBUSY; for (i = 0; i < nr_swapfiles; i++) { - struct swap_info_struct *q = &swap_info[i]; + struct swap_info_struct *q = swap_info[i]; if (i == type || !q->swap_file) continue; @@ -1897,6 +1905,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->lowest_bit = 1; p->cluster_next = 1; + p->cluster_nr = 0; /* * Find out how many pages are allowed for a single swap @@ -1932,13 +1941,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; /* OK, set up the swap map and apply the bad block list */ - swap_map = vmalloc(maxpages * sizeof(short)); + swap_map = vmalloc(maxpages); if (!swap_map) { error = -ENOMEM; goto bad_swap; } - memset(swap_map, 0, maxpages * sizeof(short)); + memset(swap_map, 0, maxpages); for (i = 0; i < swap_header->info.nr_badpages; i++) { int page_nr = swap_header->info.badpages[i]; if (page_nr <= 0 || page_nr >= swap_header->info.last_page) { @@ -2003,18 +2012,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) /* insert swap space into swap_list: */ prev = -1; - for (i = swap_list.head; i >= 0; i = swap_info[i].next) { - if (p->prio >= swap_info[i].prio) { + for (i = swap_list.head; i >= 0; i = swap_info[i]->next) { + if (p->prio >= swap_info[i]->prio) break; - } prev = i; } p->next = i; - if (prev < 0) { - swap_list.head = swap_list.next = p - swap_info; - } else { - swap_info[prev].next = p - swap_info; - } + if (prev < 0) + swap_list.head = swap_list.next = type; + else + swap_info[prev]->next = type; spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); error = 0; @@ -2051,15 +2058,15 @@ out: void si_swapinfo(struct sysinfo *val) { - unsigned int i; + unsigned int type; unsigned long nr_to_be_unused = 0; spin_lock(&swap_lock); - for (i = 0; i < nr_swapfiles; i++) { - if (!(swap_info[i].flags & SWP_USED) || - (swap_info[i].flags & SWP_WRITEOK)) - continue; - nr_to_be_unused += swap_info[i].inuse_pages; + for (type = 0; type < nr_swapfiles; type++) { + struct swap_info_struct *si = swap_info[type]; + + if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) + nr_to_be_unused += si->inuse_pages; } val->freeswap = nr_swap_pages + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; @@ -2069,101 +2076,107 @@ void si_swapinfo(struct sysinfo *val) /* * Verify that a swap entry is valid and increment its swap map count. * - * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as - * "permanent", but will be reclaimed by the next swapoff. * Returns error code in following case. * - success -> 0 * - swp_entry is invalid -> EINVAL * - swp_entry is migration entry -> EINVAL * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT + * - swap-mapped reference requested but needs continued swap count. -> ENOMEM */ -static int __swap_duplicate(swp_entry_t entry, bool cache) +static int __swap_duplicate(swp_entry_t entry, unsigned char usage) { - struct swap_info_struct * p; + struct swap_info_struct *p; unsigned long offset, type; - int result = -EINVAL; - int count; - bool has_cache; + unsigned char count; + unsigned char has_cache; + int err = -EINVAL; if (non_swap_entry(entry)) - return -EINVAL; + goto out; type = swp_type(entry); if (type >= nr_swapfiles) goto bad_file; - p = type + swap_info; + p = swap_info[type]; offset = swp_offset(entry); spin_lock(&swap_lock); - if (unlikely(offset >= p->max)) goto unlock_out; - count = swap_count(p->swap_map[offset]); - has_cache = swap_has_cache(p->swap_map[offset]); + count = p->swap_map[offset]; + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; + err = 0; - if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */ + if (usage == SWAP_HAS_CACHE) { /* set SWAP_HAS_CACHE if there is no cache and entry is used */ - if (!has_cache && count) { - p->swap_map[offset] = encode_swapmap(count, true); - result = 0; - } else if (has_cache) /* someone added cache */ - result = -EEXIST; - else if (!count) /* no users */ - result = -ENOENT; + if (!has_cache && count) + has_cache = SWAP_HAS_CACHE; + else if (has_cache) /* someone else added cache */ + err = -EEXIST; + else /* no users remaining */ + err = -ENOENT; } else if (count || has_cache) { - if (count < SWAP_MAP_MAX - 1) { - p->swap_map[offset] = encode_swapmap(count + 1, - has_cache); - result = 0; - } else if (count <= SWAP_MAP_MAX) { - if (swap_overflow++ < 5) - printk(KERN_WARNING - "swap_dup: swap entry overflow\n"); - p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX, - has_cache); - result = 0; - } + + if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) + count += usage; + else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) + err = -EINVAL; + else if (swap_count_continued(p, offset, count)) + count = COUNT_CONTINUED; + else + err = -ENOMEM; } else - result = -ENOENT; /* unused swap entry */ + err = -ENOENT; /* unused swap entry */ + + p->swap_map[offset] = count | has_cache; + unlock_out: spin_unlock(&swap_lock); out: - return result; + return err; bad_file: printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); goto out; } + +/* + * Help swapoff by noting that swap entry belongs to shmem/tmpfs + * (in which case its reference count is never incremented). + */ +void swap_shmem_alloc(swp_entry_t entry) +{ + __swap_duplicate(entry, SWAP_MAP_SHMEM); +} + /* * increase reference count of swap entry by 1. */ -void swap_duplicate(swp_entry_t entry) +int swap_duplicate(swp_entry_t entry) { - __swap_duplicate(entry, SWAP_MAP); + int err = 0; + + while (!err && __swap_duplicate(entry, 1) == -ENOMEM) + err = add_swap_count_continuation(entry, GFP_ATOMIC); + return err; } /* * @entry: swap entry for which we allocate swap cache. * - * Called when allocating swap cache for exising swap entry, + * Called when allocating swap cache for existing swap entry, * This can return error codes. Returns 0 at success. * -EBUSY means there is a swap cache. * Note: return code is different from swap_duplicate(). */ int swapcache_prepare(swp_entry_t entry) { - return __swap_duplicate(entry, SWAP_CACHE); -} - - -struct swap_info_struct * -get_swap_info_struct(unsigned type) -{ - return &swap_info[type]; + return __swap_duplicate(entry, SWAP_HAS_CACHE); } /* @@ -2181,7 +2194,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) if (!our_page_cluster) /* no readahead */ return 0; - si = &swap_info[swp_type(entry)]; + si = swap_info[swp_type(entry)]; target = swp_offset(entry); base = (target >> our_page_cluster) << our_page_cluster; end = base + (1 << our_page_cluster); @@ -2217,3 +2230,219 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) *offset = ++toff; return nr_pages? ++nr_pages: 0; } + +/* + * add_swap_count_continuation - called when a swap count is duplicated + * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's + * page of the original vmalloc'ed swap_map, to hold the continuation count + * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called + * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. + * + * These continuation pages are seldom referenced: the common paths all work + * on the original swap_map, only referring to a continuation page when the + * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. + * + * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding + * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) + * can be called after dropping locks. + */ +int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) +{ + struct swap_info_struct *si; + struct page *head; + struct page *page; + struct page *list_page; + pgoff_t offset; + unsigned char count; + + /* + * When debugging, it's easier to use __GFP_ZERO here; but it's better + * for latency not to zero a page while GFP_ATOMIC and holding locks. + */ + page = alloc_page(gfp_mask | __GFP_HIGHMEM); + + si = swap_info_get(entry); + if (!si) { + /* + * An acceptable race has occurred since the failing + * __swap_duplicate(): the swap entry has been freed, + * perhaps even the whole swap_map cleared for swapoff. + */ + goto outer; + } + + offset = swp_offset(entry); + count = si->swap_map[offset] & ~SWAP_HAS_CACHE; + + if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { + /* + * The higher the swap count, the more likely it is that tasks + * will race to add swap count continuation: we need to avoid + * over-provisioning. + */ + goto out; + } + + if (!page) { + spin_unlock(&swap_lock); + return -ENOMEM; + } + + /* + * We are fortunate that although vmalloc_to_page uses pte_offset_map, + * no architecture is using highmem pages for kernel pagetables: so it + * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps. + */ + head = vmalloc_to_page(si->swap_map + offset); + offset &= ~PAGE_MASK; + + /* + * Page allocation does not initialize the page's lru field, + * but it does always reset its private field. + */ + if (!page_private(head)) { + BUG_ON(count & COUNT_CONTINUED); + INIT_LIST_HEAD(&head->lru); + set_page_private(head, SWP_CONTINUED); + si->flags |= SWP_CONTINUED; + } + + list_for_each_entry(list_page, &head->lru, lru) { + unsigned char *map; + + /* + * If the previous map said no continuation, but we've found + * a continuation page, free our allocation and use this one. + */ + if (!(count & COUNT_CONTINUED)) + goto out; + + map = kmap_atomic(list_page, KM_USER0) + offset; + count = *map; + kunmap_atomic(map, KM_USER0); + + /* + * If this continuation count now has some space in it, + * free our allocation and use this one. + */ + if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) + goto out; + } + + list_add_tail(&page->lru, &head->lru); + page = NULL; /* now it's attached, don't free it */ +out: + spin_unlock(&swap_lock); +outer: + if (page) + __free_page(page); + return 0; +} + +/* + * swap_count_continued - when the original swap_map count is incremented + * from SWAP_MAP_MAX, check if there is already a continuation page to carry + * into, carry if so, or else fail until a new continuation page is allocated; + * when the original swap_map count is decremented from 0 with continuation, + * borrow from the continuation and report whether it still holds more. + * Called while __swap_duplicate() or swap_entry_free() holds swap_lock. + */ +static bool swap_count_continued(struct swap_info_struct *si, + pgoff_t offset, unsigned char count) +{ + struct page *head; + struct page *page; + unsigned char *map; + + head = vmalloc_to_page(si->swap_map + offset); + if (page_private(head) != SWP_CONTINUED) { + BUG_ON(count & COUNT_CONTINUED); + return false; /* need to add count continuation */ + } + + offset &= ~PAGE_MASK; + page = list_entry(head->lru.next, struct page, lru); + map = kmap_atomic(page, KM_USER0) + offset; + + if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ + goto init_map; /* jump over SWAP_CONT_MAX checks */ + + if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ + /* + * Think of how you add 1 to 999 + */ + while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.next, struct page, lru); + BUG_ON(page == head); + map = kmap_atomic(page, KM_USER0) + offset; + } + if (*map == SWAP_CONT_MAX) { + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.next, struct page, lru); + if (page == head) + return false; /* add count continuation */ + map = kmap_atomic(page, KM_USER0) + offset; +init_map: *map = 0; /* we didn't zero the page */ + } + *map += 1; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + while (page != head) { + map = kmap_atomic(page, KM_USER0) + offset; + *map = COUNT_CONTINUED; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + } + return true; /* incremented */ + + } else { /* decrementing */ + /* + * Think of how you subtract 1 from 1000 + */ + BUG_ON(count != COUNT_CONTINUED); + while (*map == COUNT_CONTINUED) { + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.next, struct page, lru); + BUG_ON(page == head); + map = kmap_atomic(page, KM_USER0) + offset; + } + BUG_ON(*map == 0); + *map -= 1; + if (*map == 0) + count = 0; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + while (page != head) { + map = kmap_atomic(page, KM_USER0) + offset; + *map = SWAP_CONT_MAX | count; + count = COUNT_CONTINUED; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + } + return count == COUNT_CONTINUED; + } +} + +/* + * free_swap_count_continuations - swapoff free all the continuation pages + * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. + */ +static void free_swap_count_continuations(struct swap_info_struct *si) +{ + pgoff_t offset; + + for (offset = 0; offset < si->max; offset += PAGE_SIZE) { + struct page *head; + head = vmalloc_to_page(si->swap_map + offset); + if (page_private(head)) { + struct list_head *this, *next; + list_for_each_safe(this, next, &head->lru) { + struct page *page; + page = list_entry(this, struct page, lru); + list_del(this); + __free_page(page); + } + } + } +} diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9b08d790df6..37e69295f25 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1411,6 +1411,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, { struct page **pages; unsigned int nr_pages, array_size, i; + gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); @@ -1418,13 +1419,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, + pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, PAGE_KERNEL, node, caller); area->flags |= VM_VPAGES; } else { - pages = kmalloc_node(array_size, - (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, - node); + pages = kmalloc_node(array_size, nested_gfp, node); } area->pages = pages; area->caller = caller; diff --git a/mm/vmscan.c b/mm/vmscan.c index 777af57fd8c..885207a6b6b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -55,6 +55,11 @@ struct scan_control { /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; + /* How many pages shrink_list() should reclaim */ + unsigned long nr_to_reclaim; + + unsigned long hibernation_mode; + /* This context's GFP mask */ gfp_t gfp_mask; @@ -66,12 +71,6 @@ struct scan_control { /* Can pages be swapped as part of reclaim? */ int may_swap; - /* This context's SWAP_CLUSTER_MAX. If freeing memory for - * suspend, we effectively ignore SWAP_CLUSTER_MAX. - * In this context, it doesn't matter that we scan the - * whole list at once. */ - int swap_cluster_max; - int swappiness; int all_unreclaimable; @@ -358,7 +357,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, * stalls if we need to run get_block(). We could test * PagePrivate for that. * - * If this process is currently in generic_file_write() against + * If this process is currently in __generic_file_aio_write() against * this page's queue, we can perform writeback even if that * will block. * @@ -1132,7 +1131,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, unsigned long nr_anon; unsigned long nr_file; - nr_taken = sc->isolate_pages(sc->swap_cluster_max, + nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX, &page_list, &nr_scan, sc->order, mode, zone, sc->mem_cgroup, 0, file); @@ -1166,10 +1165,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); - reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; - reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; - reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE]; - reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE]; + reclaim_stat->recent_scanned[0] += nr_anon; + reclaim_stat->recent_scanned[1] += nr_file; spin_unlock_irq(&zone->lru_lock); @@ -1464,20 +1461,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc) return low; } +static int inactive_list_is_low(struct zone *zone, struct scan_control *sc, + int file) +{ + if (file) + return inactive_file_is_low(zone, sc); + else + return inactive_anon_is_low(zone, sc); +} + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct zone *zone, struct scan_control *sc, int priority) { int file = is_file_lru(lru); - if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) { - shrink_active_list(nr_to_scan, zone, sc, priority, file); + if (is_active_lru(lru)) { + if (inactive_list_is_low(zone, sc, file)) + shrink_active_list(nr_to_scan, zone, sc, priority, file); return 0; } - if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) { - shrink_active_list(nr_to_scan, zone, sc, priority, file); - return 0; - } return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); } @@ -1567,15 +1570,14 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, * until we collected @swap_cluster_max pages to scan. */ static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, - unsigned long *nr_saved_scan, - unsigned long swap_cluster_max) + unsigned long *nr_saved_scan) { unsigned long nr; *nr_saved_scan += nr_to_scan; nr = *nr_saved_scan; - if (nr >= swap_cluster_max) + if (nr >= SWAP_CLUSTER_MAX) *nr_saved_scan = 0; else nr = 0; @@ -1594,7 +1596,7 @@ static void shrink_zone(int priority, struct zone *zone, unsigned long percent[2]; /* anon @ 0; file @ 1 */ enum lru_list l; unsigned long nr_reclaimed = sc->nr_reclaimed; - unsigned long swap_cluster_max = sc->swap_cluster_max; + unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); int noswap = 0; @@ -1616,15 +1618,15 @@ static void shrink_zone(int priority, struct zone *zone, scan = (scan * percent[file]) / 100; } nr[l] = nr_scan_try_batch(scan, - &reclaim_stat->nr_saved_scan[l], - swap_cluster_max); + &reclaim_stat->nr_saved_scan[l]); } while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { for_each_evictable_lru(l) { if (nr[l]) { - nr_to_scan = min(nr[l], swap_cluster_max); + nr_to_scan = min_t(unsigned long, + nr[l], SWAP_CLUSTER_MAX); nr[l] -= nr_to_scan; nr_reclaimed += shrink_list(l, nr_to_scan, @@ -1639,8 +1641,7 @@ static void shrink_zone(int priority, struct zone *zone, * with multiple processes reclaiming pages, the total * freeing target can get unreasonably large. */ - if (nr_reclaimed > swap_cluster_max && - priority < DEF_PRIORITY && !current_is_kswapd()) + if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) break; } @@ -1738,6 +1739,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct zoneref *z; struct zone *zone; enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); + unsigned long writeback_threshold; delayacct_freepages_start(); @@ -1773,7 +1775,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, } } total_scanned += sc->nr_scanned; - if (sc->nr_reclaimed >= sc->swap_cluster_max) { + if (sc->nr_reclaimed >= sc->nr_to_reclaim) { ret = sc->nr_reclaimed; goto out; } @@ -1785,14 +1787,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ - if (total_scanned > sc->swap_cluster_max + - sc->swap_cluster_max / 2) { + writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; + if (total_scanned > writeback_threshold) { wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); sc->may_writepage = 1; } /* Take a nap, wait for some writeback to complete */ - if (sc->nr_scanned && priority < DEF_PRIORITY - 2) + if (!sc->hibernation_mode && sc->nr_scanned && + priority < DEF_PRIORITY - 2) congestion_wait(BLK_RW_ASYNC, HZ/10); } /* top priority shrink_zones still had more to do? don't OOM, then */ @@ -1831,7 +1834,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, struct scan_control sc = { .gfp_mask = gfp_mask, .may_writepage = !laptop_mode, - .swap_cluster_max = SWAP_CLUSTER_MAX, + .nr_to_reclaim = SWAP_CLUSTER_MAX, .may_unmap = 1, .may_swap = 1, .swappiness = vm_swappiness, @@ -1855,7 +1858,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, - .swap_cluster_max = SWAP_CLUSTER_MAX, .swappiness = swappiness, .order = 0, .mem_cgroup = mem, @@ -1889,7 +1891,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, - .swap_cluster_max = SWAP_CLUSTER_MAX, + .nr_to_reclaim = SWAP_CLUSTER_MAX, .swappiness = swappiness, .order = 0, .mem_cgroup = mem_cont, @@ -1904,6 +1906,30 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, } #endif +/* is kswapd sleeping prematurely? */ +static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) +{ + int i; + + /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ + if (remaining) + return 1; + + /* If after HZ/10, a zone is below the high mark, it's premature */ + for (i = 0; i < pgdat->nr_zones; i++) { + struct zone *zone = pgdat->node_zones + i; + + if (!populated_zone(zone)) + continue; + + if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), + 0, 0)) + return 1; + } + + return 0; +} + /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at high_wmark_pages(zone). @@ -1936,7 +1962,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) .gfp_mask = GFP_KERNEL, .may_unmap = 1, .may_swap = 1, - .swap_cluster_max = SWAP_CLUSTER_MAX, + /* + * kswapd doesn't want to be bailed out while reclaim. because + * we want to put equal scanning pressure on each zone. + */ + .nr_to_reclaim = ULONG_MAX, .swappiness = vm_swappiness, .order = order, .mem_cgroup = NULL, @@ -1961,6 +1991,7 @@ loop_again: for (priority = DEF_PRIORITY; priority >= 0; priority--) { int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long lru_pages = 0; + int has_under_min_watermark_zone = 0; /* The swap token gets in the way of swapout... */ if (!priority) @@ -2067,6 +2098,15 @@ loop_again: if (total_scanned > SWAP_CLUSTER_MAX * 2 && total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) sc.may_writepage = 1; + + /* + * We are still under min water mark. it mean we have + * GFP_ATOMIC allocation failure risk. Hurry up! + */ + if (!zone_watermark_ok(zone, order, min_wmark_pages(zone), + end_zone, 0)) + has_under_min_watermark_zone = 1; + } if (all_zones_ok) break; /* kswapd: all done */ @@ -2074,8 +2114,12 @@ loop_again: * OK, kswapd is getting into trouble. Take a nap, then take * another pass across the zones. */ - if (total_scanned && priority < DEF_PRIORITY - 2) - congestion_wait(BLK_RW_ASYNC, HZ/10); + if (total_scanned && (priority < DEF_PRIORITY - 2)) { + if (has_under_min_watermark_zone) + count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); + else + congestion_wait(BLK_RW_ASYNC, HZ/10); + } /* * We do this so kswapd doesn't build up large priorities for @@ -2173,6 +2217,7 @@ static int kswapd(void *p) order = 0; for ( ; ; ) { unsigned long new_order; + int ret; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; @@ -2184,19 +2229,45 @@ static int kswapd(void *p) */ order = new_order; } else { - if (!freezing(current)) - schedule(); + if (!freezing(current) && !kthread_should_stop()) { + long remaining = 0; + + /* Try to sleep for a short interval */ + if (!sleeping_prematurely(pgdat, order, remaining)) { + remaining = schedule_timeout(HZ/10); + finish_wait(&pgdat->kswapd_wait, &wait); + prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); + } + + /* + * After a short sleep, check if it was a + * premature sleep. If not, then go fully + * to sleep until explicitly woken up + */ + if (!sleeping_prematurely(pgdat, order, remaining)) + schedule(); + else { + if (remaining) + count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); + else + count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); + } + } order = pgdat->kswapd_max_order; } finish_wait(&pgdat->kswapd_wait, &wait); - if (!try_to_freeze()) { - /* We can speed up thawing tasks if we don't call - * balance_pgdat after returning from the refrigerator - */ + ret = try_to_freeze(); + if (kthread_should_stop()) + break; + + /* + * We can speed up thawing tasks if we don't call balance_pgdat + * after returning from the refrigerator + */ + if (!ret) balance_pgdat(pgdat, order); - } } return 0; } @@ -2260,148 +2331,43 @@ unsigned long zone_reclaimable_pages(struct zone *zone) #ifdef CONFIG_HIBERNATION /* - * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages - * from LRU lists system-wide, for given pass and priority. - * - * For pass > 3 we also try to shrink the LRU lists that contain a few pages - */ -static void shrink_all_zones(unsigned long nr_pages, int prio, - int pass, struct scan_control *sc) -{ - struct zone *zone; - unsigned long nr_reclaimed = 0; - struct zone_reclaim_stat *reclaim_stat; - - for_each_populated_zone(zone) { - enum lru_list l; - - if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) - continue; - - for_each_evictable_lru(l) { - enum zone_stat_item ls = NR_LRU_BASE + l; - unsigned long lru_pages = zone_page_state(zone, ls); - - /* For pass = 0, we don't shrink the active list */ - if (pass == 0 && (l == LRU_ACTIVE_ANON || - l == LRU_ACTIVE_FILE)) - continue; - - reclaim_stat = get_reclaim_stat(zone, sc); - reclaim_stat->nr_saved_scan[l] += - (lru_pages >> prio) + 1; - if (reclaim_stat->nr_saved_scan[l] - >= nr_pages || pass > 3) { - unsigned long nr_to_scan; - - reclaim_stat->nr_saved_scan[l] = 0; - nr_to_scan = min(nr_pages, lru_pages); - nr_reclaimed += shrink_list(l, nr_to_scan, zone, - sc, prio); - if (nr_reclaimed >= nr_pages) { - sc->nr_reclaimed += nr_reclaimed; - return; - } - } - } - } - sc->nr_reclaimed += nr_reclaimed; -} - -/* - * Try to free `nr_pages' of memory, system-wide, and return the number of + * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of * freed pages. * * Rather than trying to age LRUs the aim is to preserve the overall * LRU order by reclaiming preferentially * inactive > active > active referenced > active mapped */ -unsigned long shrink_all_memory(unsigned long nr_pages) +unsigned long shrink_all_memory(unsigned long nr_to_reclaim) { - unsigned long lru_pages, nr_slab; - int pass; struct reclaim_state reclaim_state; struct scan_control sc = { - .gfp_mask = GFP_KERNEL, - .may_unmap = 0, + .gfp_mask = GFP_HIGHUSER_MOVABLE, + .may_swap = 1, + .may_unmap = 1, .may_writepage = 1, + .nr_to_reclaim = nr_to_reclaim, + .hibernation_mode = 1, + .swappiness = vm_swappiness, + .order = 0, .isolate_pages = isolate_pages_global, - .nr_reclaimed = 0, }; + struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); + struct task_struct *p = current; + unsigned long nr_reclaimed; - current->reclaim_state = &reclaim_state; - - lru_pages = global_reclaimable_pages(); - nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); - /* If slab caches are huge, it's better to hit them first */ - while (nr_slab >= lru_pages) { - reclaim_state.reclaimed_slab = 0; - shrink_slab(nr_pages, sc.gfp_mask, lru_pages); - if (!reclaim_state.reclaimed_slab) - break; - - sc.nr_reclaimed += reclaim_state.reclaimed_slab; - if (sc.nr_reclaimed >= nr_pages) - goto out; - - nr_slab -= reclaim_state.reclaimed_slab; - } - - /* - * We try to shrink LRUs in 5 passes: - * 0 = Reclaim from inactive_list only - * 1 = Reclaim from active list but don't reclaim mapped - * 2 = 2nd pass of type 1 - * 3 = Reclaim mapped (normal reclaim) - * 4 = 2nd pass of type 3 - */ - for (pass = 0; pass < 5; pass++) { - int prio; - - /* Force reclaiming mapped pages in the passes #3 and #4 */ - if (pass > 2) - sc.may_unmap = 1; - - for (prio = DEF_PRIORITY; prio >= 0; prio--) { - unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed; - - sc.nr_scanned = 0; - sc.swap_cluster_max = nr_to_scan; - shrink_all_zones(nr_to_scan, prio, pass, &sc); - if (sc.nr_reclaimed >= nr_pages) - goto out; - - reclaim_state.reclaimed_slab = 0; - shrink_slab(sc.nr_scanned, sc.gfp_mask, - global_reclaimable_pages()); - sc.nr_reclaimed += reclaim_state.reclaimed_slab; - if (sc.nr_reclaimed >= nr_pages) - goto out; - - if (sc.nr_scanned && prio < DEF_PRIORITY - 2) - congestion_wait(BLK_RW_ASYNC, HZ / 10); - } - } - - /* - * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be - * something in slab caches - */ - if (!sc.nr_reclaimed) { - do { - reclaim_state.reclaimed_slab = 0; - shrink_slab(nr_pages, sc.gfp_mask, - global_reclaimable_pages()); - sc.nr_reclaimed += reclaim_state.reclaimed_slab; - } while (sc.nr_reclaimed < nr_pages && - reclaim_state.reclaimed_slab > 0); - } + p->flags |= PF_MEMALLOC; + lockdep_set_current_reclaim_state(sc.gfp_mask); + reclaim_state.reclaimed_slab = 0; + p->reclaim_state = &reclaim_state; + nr_reclaimed = do_try_to_free_pages(zonelist, &sc); -out: - current->reclaim_state = NULL; + p->reclaim_state = NULL; + lockdep_clear_current_reclaim_state(); + p->flags &= ~PF_MEMALLOC; - return sc.nr_reclaimed; + return nr_reclaimed; } #endif /* CONFIG_HIBERNATION */ @@ -2451,6 +2417,17 @@ int kswapd_run(int nid) return ret; } +/* + * Called by memory hotplug when all memory in a node is offlined. + */ +void kswapd_stop(int nid) +{ + struct task_struct *kswapd = NODE_DATA(nid)->kswapd; + + if (kswapd) + kthread_stop(kswapd); +} + static int __init kswapd_init(void) { int nid; @@ -2553,8 +2530,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), .may_swap = 1, - .swap_cluster_max = max_t(unsigned long, nr_pages, - SWAP_CLUSTER_MAX), + .nr_to_reclaim = max_t(unsigned long, nr_pages, + SWAP_CLUSTER_MAX), .gfp_mask = gfp_mask, .swappiness = vm_swappiness, .order = order, diff --git a/mm/vmstat.c b/mm/vmstat.c index dad2327e458..6051fbab67b 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -683,6 +683,9 @@ static const char * const vmstat_text[] = { "slabs_scanned", "kswapd_steal", "kswapd_inodesteal", + "kswapd_low_wmark_hit_quickly", + "kswapd_high_wmark_hit_quickly", + "kswapd_skip_congestion_wait", "pageoutrun", "allocstall", diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h index b001c361ad3..4300df35d37 100644 --- a/net/irda/irnet/irnet.h +++ b/net/irda/irnet/irnet.h @@ -249,6 +249,7 @@ #include <linux/poll.h> #include <linux/capability.h> #include <linux/ctype.h> /* isspace() */ +#include <linux/string.h> /* skip_spaces() */ #include <asm/uaccess.h> #include <linux/init.h> diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 7dea882dbb7..156020d138b 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap, /* Look at the next command */ start = next; - /* Scrap whitespaces before the command */ - while(isspace(*start)) - start++; + /* Scrap whitespaces before the command */ + start = skip_spaces(start); /* ',' is our command separator */ next = strchr(start, ','); @@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap, char * endp; /* Scrap whitespaces before the command */ - while(isspace(*begp)) - begp++; + begp = skip_spaces(begp); /* Convert argument to a number (last arg is the base) */ addr = simple_strtoul(begp, &endp, 16); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 1e428863574..c18286a2167 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev) return 0; } -static struct dev_pm_ops afiucv_pm_ops = { +static const struct dev_pm_ops afiucv_pm_ops = { .prepare = afiucv_pm_prepare, .complete = afiucv_pm_complete, .freeze = afiucv_pm_freeze, diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 3b1f5f5f8de..fd8b28361a6 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *); static int iucv_pm_thaw(struct device *); static int iucv_pm_restore(struct device *); -static struct dev_pm_ops iucv_pm_ops = { +static const struct dev_pm_ops iucv_pm_ops = { .prepare = iucv_pm_prepare, .complete = iucv_pm_complete, .freeze = iucv_pm_freeze, diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index eb0ceb84652..fc70a49c0af 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file, if (copy_from_user(buf, input, size)) return -EFAULT; - while (isspace(*c)) - c++; + c = skip_spaces(c); if (size - (c - buf) < 5) return c - buf; diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 81a67a458e7..445e8845f0a 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl @@ -13,7 +13,7 @@ use strict; my $P = $0; -my $V = '0.21'; +my $V = '0.23'; use Getopt::Long qw(:config no_auto_abbrev); @@ -23,16 +23,19 @@ my $email_usename = 1; my $email_maintainer = 1; my $email_list = 1; my $email_subscriber_list = 0; -my $email_git = 1; my $email_git_penguin_chiefs = 0; +my $email_git = 1; +my $email_git_blame = 0; my $email_git_min_signatures = 1; my $email_git_max_maintainers = 5; my $email_git_min_percent = 5; my $email_git_since = "1-year-ago"; -my $email_git_blame = 0; +my $email_hg_since = "-365"; my $email_remove_duplicates = 1; my $output_multiline = 1; my $output_separator = ", "; +my $output_roles = 0; +my $output_rolestats = 0; my $scm = 0; my $web = 0; my $subsystem = 0; @@ -64,21 +67,52 @@ my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)"; my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])"; my $rfc822_char = '[\\000-\\377]'; +# VCS command support: class-like functions and strings + +my %VCS_cmds; + +my %VCS_cmds_git = ( + "execute_cmd" => \&git_execute_cmd, + "available" => '(which("git") ne "") && (-d ".git")', + "find_signers_cmd" => "git log --since=\$email_git_since -- \$file", + "find_commit_signers_cmd" => "git log -1 \$commit", + "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file", + "blame_file_cmd" => "git blame -l \$file", + "commit_pattern" => "^commit [0-9a-f]{40,40}", + "blame_commit_pattern" => "^([0-9a-f]+) " +); + +my %VCS_cmds_hg = ( + "execute_cmd" => \&hg_execute_cmd, + "available" => '(which("hg") ne "") && (-d ".hg")', + "find_signers_cmd" => + "hg log --date=\$email_hg_since" . + " --template='commit {node}\\n{desc}\\n' -- \$file", + "find_commit_signers_cmd" => "hg log --template='{desc}\\n' -r \$commit", + "blame_range_cmd" => "", # not supported + "blame_file_cmd" => "hg blame -c \$file", + "commit_pattern" => "^commit [0-9a-f]{40,40}", + "blame_commit_pattern" => "^([0-9a-f]+):" +); + if (!GetOptions( 'email!' => \$email, 'git!' => \$email_git, + 'git-blame!' => \$email_git_blame, 'git-chief-penguins!' => \$email_git_penguin_chiefs, 'git-min-signatures=i' => \$email_git_min_signatures, 'git-max-maintainers=i' => \$email_git_max_maintainers, 'git-min-percent=i' => \$email_git_min_percent, 'git-since=s' => \$email_git_since, - 'git-blame!' => \$email_git_blame, + 'hg-since=s' => \$email_hg_since, 'remove-duplicates!' => \$email_remove_duplicates, 'm!' => \$email_maintainer, 'n!' => \$email_usename, 'l!' => \$email_list, 's!' => \$email_subscriber_list, 'multiline!' => \$output_multiline, + 'roles!' => \$output_roles, + 'rolestats!' => \$output_rolestats, 'separator=s' => \$output_separator, 'subsystem!' => \$subsystem, 'status!' => \$status, @@ -90,8 +124,7 @@ if (!GetOptions( 'v|version' => \$version, 'h|help' => \$help, )) { - usage(); - die "$P: invalid argument\n"; + die "$P: invalid argument - use --help if necessary\n"; } if ($help != 0) { @@ -113,6 +146,10 @@ if ($output_separator ne ", ") { $output_multiline = 0; } +if ($output_rolestats) { + $output_roles = 1; +} + my $selections = $email + $scm + $status + $subsystem + $web; if ($selections == 0) { usage(); @@ -175,7 +212,7 @@ if ($email_remove_duplicates) { next if ($line =~ m/^\s*$/); my ($name, $address) = parse_email($line); - $line = format_email($name, $address); + $line = format_email($name, $address, $email_usename); next if ($line =~ m/^\s*$/); @@ -207,12 +244,10 @@ foreach my $file (@ARGV) { push(@files, $file); if (-f $file && $keywords) { open(FILE, "<$file") or die "$P: Can't open ${file}\n"; - while (<FILE>) { - my $patch_line = $_; - foreach my $line (keys %keyword_hash) { - if ($patch_line =~ m/^.*$keyword_hash{$line}/x) { - push(@keyword_tvi, $line); - } + my $text = do { local($/) ; <FILE> }; + foreach my $line (keys %keyword_hash) { + if ($text =~ m/$keyword_hash{$line}/x) { + push(@keyword_tvi, $line); } } close(FILE); @@ -304,11 +339,11 @@ foreach my $file (@files) { } if ($email && $email_git) { - recent_git_signoffs($file); + vcs_file_signoffs($file); } if ($email && $email_git_blame) { - git_assign_blame($file); + vcs_file_blame($file); } } @@ -324,11 +359,11 @@ if ($email) { if ($chief =~ m/^(.*):(.*)/) { my $email_address; - $email_address = format_email($1, $2); + $email_address = format_email($1, $2, $email_usename); if ($email_git_penguin_chiefs) { - push(@email_to, $email_address); + push(@email_to, [$email_address, 'chief penguin']); } else { - @email_to = grep(!/${email_address}/, @email_to); + @email_to = grep($_->[0] !~ /${email_address}/, @email_to); } } } @@ -342,7 +377,7 @@ if ($email || $email_list) { if ($email_list) { @to = (@to, @list_to); } - output(uniq(@to)); + output(merge_email(@to)); } if ($scm) { @@ -398,13 +433,16 @@ MAINTAINER field selection options: --git-min-signatures => number of signatures required (default: 1) --git-max-maintainers => maximum maintainers to add (default: 5) --git-min-percent => minimum percentage of commits required (default: 5) - --git-since => git history to use (default: 1-year-ago) --git-blame => use git blame to find modified commits for patch or file + --git-since => git history to use (default: 1-year-ago) + --hg-since => hg history to use (default: -365) --m => include maintainer(s) if any --n => include name 'Full Name <addr\@domain.tld>' --l => include list(s) if any --s => include subscriber only list(s) if any --remove-duplicates => minimize duplicate email names/addresses + --roles => show roles (status:subsystem, git-signer, list, etc...) + --rolestats => show roles and statistics (commits/total_commits, %) --scm => print SCM tree(s) if any --status => print status if any --subsystem => print subsystem name if any @@ -430,11 +468,24 @@ Notes: directory are examined as git recurses directories. Any specified X: (exclude) pattern matches are _not_ ignored. Used with "--nogit", directory is used as a pattern match, - no individual file within the directory or subdirectory - is matched. + no individual file within the directory or subdirectory + is matched. Used with "--git-blame", does not iterate all files in directory Using "--git-blame" is slow and may add old committers and authors that are no longer active maintainers to the output. + Using "--roles" or "--rolestats" with git send-email --cc-cmd or any + other automated tools that expect only ["name"] <email address> + may not work because of additional output after <email address>. + Using "--rolestats" and "--git-blame" shows the #/total=% commits, + not the percentage of the entire file authored. # of commits is + not a good measure of amount of code authored. 1 major commit may + contain a thousand lines, 5 trivial commits may modify a single line. + If git is not installed, but mercurial (hg) is installed and an .hg + repository exists, the following options apply to mercurial: + --git, + --git-min-signatures, --git-max-maintainers, --git-min-percent, and + --git-blame + Use --hg-since not --git-since to control date selection EOT } @@ -493,7 +544,7 @@ sub parse_email { } sub format_email { - my ($name, $address) = @_; + my ($name, $address, $usename) = @_; my $formatted_email; @@ -506,11 +557,11 @@ sub format_email { $name = "\"$name\""; } - if ($email_usename) { + if ($usename) { if ("$name" eq "") { $formatted_email = "$address"; } else { - $formatted_email = "$name <${address}>"; + $formatted_email = "$name <$address>"; } } else { $formatted_email = $address; @@ -547,6 +598,71 @@ sub find_ending_index { return $index; } +sub get_maintainer_role { + my ($index) = @_; + + my $i; + my $start = find_starting_index($index); + my $end = find_ending_index($index); + + my $role; + my $subsystem = $typevalue[$start]; + if (length($subsystem) > 20) { + $subsystem = substr($subsystem, 0, 17); + $subsystem =~ s/\s*$//; + $subsystem = $subsystem . "..."; + } + + for ($i = $start + 1; $i < $end; $i++) { + my $tv = $typevalue[$i]; + if ($tv =~ m/^(\C):\s*(.*)/) { + my $ptype = $1; + my $pvalue = $2; + if ($ptype eq "S") { + $role = $pvalue; + } + } + } + + $role = lc($role); + if ($role eq "supported") { + $role = "supporter"; + } elsif ($role eq "maintained") { + $role = "maintainer"; + } elsif ($role eq "odd fixes") { + $role = "odd fixer"; + } elsif ($role eq "orphan") { + $role = "orphan minder"; + } elsif ($role eq "obsolete") { + $role = "obsolete minder"; + } elsif ($role eq "buried alive in reporters") { + $role = "chief penguin"; + } + + return $role . ":" . $subsystem; +} + +sub get_list_role { + my ($index) = @_; + + my $i; + my $start = find_starting_index($index); + my $end = find_ending_index($index); + + my $subsystem = $typevalue[$start]; + if (length($subsystem) > 20) { + $subsystem = substr($subsystem, 0, 17); + $subsystem =~ s/\s*$//; + $subsystem = $subsystem . "..."; + } + + if ($subsystem eq "THE REST") { + $subsystem = ""; + } + + return $subsystem; +} + sub add_categories { my ($index) = @_; @@ -564,17 +680,22 @@ sub add_categories { if ($ptype eq "L") { my $list_address = $pvalue; my $list_additional = ""; + my $list_role = get_list_role($i); + + if ($list_role ne "") { + $list_role = ":" . $list_role; + } if ($list_address =~ m/([^\s]+)\s+(.*)$/) { $list_address = $1; $list_additional = $2; } if ($list_additional =~ m/subscribers-only/) { if ($email_subscriber_list) { - push(@list_to, $list_address); + push(@list_to, [$list_address, "subscriber list${list_role}"]); } } else { if ($email_list) { - push(@list_to, $list_address); + push(@list_to, [$list_address, "open list${list_role}"]); } } } elsif ($ptype eq "M") { @@ -585,13 +706,14 @@ sub add_categories { if ($tv =~ m/^(\C):\s*(.*)/) { if ($1 eq "P") { $name = $2; - $pvalue = format_email($name, $address); + $pvalue = format_email($name, $address, $email_usename); } } } } if ($email_maintainer) { - push_email_addresses($pvalue); + my $role = get_maintainer_role($i); + push_email_addresses($pvalue, $role); } } elsif ($ptype eq "T") { push(@scm, $pvalue); @@ -618,7 +740,7 @@ sub email_inuse { } sub push_email_address { - my ($line) = @_; + my ($line, $role) = @_; my ($name, $address) = parse_email($line); @@ -627,9 +749,9 @@ sub push_email_address { } if (!$email_remove_duplicates) { - push(@email_to, format_email($name, $address)); + push(@email_to, [format_email($name, $address, $email_usename), $role]); } elsif (!email_inuse($name, $address)) { - push(@email_to, format_email($name, $address)); + push(@email_to, [format_email($name, $address, $email_usename), $role]); $email_hash_name{$name}++; $email_hash_address{$address}++; } @@ -638,24 +760,52 @@ sub push_email_address { } sub push_email_addresses { - my ($address) = @_; + my ($address, $role) = @_; my @address_list = (); if (rfc822_valid($address)) { - push_email_address($address); + push_email_address($address, $role); } elsif (@address_list = rfc822_validlist($address)) { my $array_count = shift(@address_list); while (my $entry = shift(@address_list)) { - push_email_address($entry); + push_email_address($entry, $role); } } else { - if (!push_email_address($address)) { + if (!push_email_address($address, $role)) { warn("Invalid MAINTAINERS address: '" . $address . "'\n"); } } } +sub add_role { + my ($line, $role) = @_; + + my ($name, $address) = parse_email($line); + my $email = format_email($name, $address, $email_usename); + + foreach my $entry (@email_to) { + if ($email_remove_duplicates) { + my ($entry_name, $entry_address) = parse_email($entry->[0]); + if ($name eq $entry_name || $address eq $entry_address) { + if ($entry->[1] eq "") { + $entry->[1] = "$role"; + } else { + $entry->[1] = "$entry->[1],$role"; + } + } + } else { + if ($email eq $entry->[0]) { + if ($entry->[1] eq "") { + $entry->[1] = "$role"; + } else { + $entry->[1] = "$entry->[1],$role"; + } + } + } + } +} + sub which { my ($bin) = @_; @@ -669,7 +819,7 @@ sub which { } sub mailmap { - my @lines = @_; + my (@lines) = @_; my %hash; foreach my $line (@lines) { @@ -678,14 +828,14 @@ sub mailmap { $hash{$name} = $address; } elsif ($address ne $hash{$name}) { $address = $hash{$name}; - $line = format_email($name, $address); + $line = format_email($name, $address, $email_usename); } if (exists($mailmap{$name})) { my $obj = $mailmap{$name}; foreach my $map_address (@$obj) { if (($map_address eq $address) && ($map_address ne $hash{$name})) { - $line = format_email($name, $hash{$name}); + $line = format_email($name, $hash{$name}, $email_usename); } } } @@ -694,34 +844,38 @@ sub mailmap { return @lines; } -sub recent_git_signoffs { - my ($file) = @_; - - my $sign_offs = ""; - my $cmd = ""; - my $output = ""; - my $count = 0; +sub git_execute_cmd { + my ($cmd) = @_; my @lines = (); - my %hash; - my $total_sign_offs; - if (which("git") eq "") { - warn("$P: git not found. Add --nogit to options?\n"); - return; - } - if (!(-d ".git")) { - warn("$P: .git directory not found. Use a git repository for better results.\n"); - warn("$P: perhaps 'git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git'\n"); - return; - } + my $output = `$cmd`; + $output =~ s/^\s*//gm; + @lines = split("\n", $output); - $cmd = "git log --since=${email_git_since} -- ${file}"; + return @lines; +} - $output = `${cmd}`; - $output =~ s/^\s*//gm; +sub hg_execute_cmd { + my ($cmd) = @_; + my @lines = (); + my $output = `$cmd`; @lines = split("\n", $output); + return @lines; +} + +sub vcs_find_signers { + my ($cmd) = @_; + my @lines = (); + my $commits; + + @lines = &{$VCS_cmds{"execute_cmd"}}($cmd); + + my $pattern = $VCS_cmds{"commit_pattern"}; + + $commits = grep(/$pattern/, @lines); # of commits + @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines); if (!$email_git_penguin_chiefs) { @lines = grep(!/${penguin_chiefs}/i, @lines); @@ -729,111 +883,183 @@ sub recent_git_signoffs { # cut -f2- -d":" s/.*:\s*(.+)\s*/$1/ for (@lines); - $total_sign_offs = @lines; +## Reformat email addresses (with names) to avoid badly written signatures - if ($email_remove_duplicates) { - @lines = mailmap(@lines); + foreach my $line (@lines) { + my ($name, $address) = parse_email($line); + $line = format_email($name, $address, 1); } - @lines = sort(@lines); - - # uniq -c - $hash{$_}++ for @lines; - - # sort -rn - foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) { - my $sign_offs = $hash{$line}; - $count++; - last if ($sign_offs < $email_git_min_signatures || - $count > $email_git_max_maintainers || - $sign_offs * 100 / $total_sign_offs < $email_git_min_percent); - push_email_address($line); - } + return ($commits, @lines); } -sub save_commits { - my ($cmd, @commits) = @_; - my $output; +sub vcs_save_commits { + my ($cmd) = @_; my @lines = (); + my @commits = (); - $output = `${cmd}`; + @lines = &{$VCS_cmds{"execute_cmd"}}($cmd); - @lines = split("\n", $output); foreach my $line (@lines) { - if ($line =~ m/^(\w+) /) { - push (@commits, $1); + if ($line =~ m/$VCS_cmds{"blame_commit_pattern"}/) { + push(@commits, $1); } } + return @commits; } -sub git_assign_blame { +sub vcs_blame { my ($file) = @_; - - my @lines = (); - my @commits = (); my $cmd; - my $output; - my %hash; - my $total_sign_offs; - my $count; + my @commits = (); + + return @commits if (!(-f $file)); + + if (@range && $VCS_cmds{"blame_range_cmd"} eq "") { + my @all_commits = (); + + $cmd = $VCS_cmds{"blame_file_cmd"}; + $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd + @all_commits = vcs_save_commits($cmd); - if (@range) { foreach my $file_range_diff (@range) { next if (!($file_range_diff =~ m/(.+):(.+):(.+)/)); my $diff_file = $1; my $diff_start = $2; my $diff_length = $3; - next if (!("$file" eq "$diff_file")); - $cmd = "git blame -l -L $diff_start,+$diff_length $file"; - @commits = save_commits($cmd, @commits); + next if ("$file" ne "$diff_file"); + for (my $i = $diff_start; $i < $diff_start + $diff_length; $i++) { + push(@commits, $all_commits[$i]); + } } - } else { - if (-f $file) { - $cmd = "git blame -l $file"; - @commits = save_commits($cmd, @commits); + } elsif (@range) { + foreach my $file_range_diff (@range) { + next if (!($file_range_diff =~ m/(.+):(.+):(.+)/)); + my $diff_file = $1; + my $diff_start = $2; + my $diff_length = $3; + next if ("$file" ne "$diff_file"); + $cmd = $VCS_cmds{"blame_range_cmd"}; + $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd + push(@commits, vcs_save_commits($cmd)); } + } else { + $cmd = $VCS_cmds{"blame_file_cmd"}; + $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd + @commits = vcs_save_commits($cmd); } - $total_sign_offs = 0; - @commits = uniq(@commits); - foreach my $commit (@commits) { - $cmd = "git log -1 ${commit}"; + return @commits; +} - $output = `${cmd}`; - $output =~ s/^\s*//gm; - @lines = split("\n", $output); +my $printed_novcs = 0; +sub vcs_exists { + %VCS_cmds = %VCS_cmds_git; + return 1 if eval $VCS_cmds{"available"}; + %VCS_cmds = %VCS_cmds_hg; + return 1 if eval $VCS_cmds{"available"}; + %VCS_cmds = (); + if (!$printed_novcs) { + warn("$P: No supported VCS found. Add --nogit to options?\n"); + warn("Using a git repository produces better results.\n"); + warn("Try Linus Torvalds' latest git repository using:\n"); + warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n"); + $printed_novcs = 1; + } + return 0; +} - @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines); - if (!$email_git_penguin_chiefs) { - @lines = grep(!/${penguin_chiefs}/i, @lines); - } +sub vcs_assign { + my ($role, $divisor, @lines) = @_; - # cut -f2- -d":" - s/.*:\s*(.+)\s*/$1/ for (@lines); + my %hash; + my $count = 0; - $total_sign_offs += @lines; + return if (@lines <= 0); - if ($email_remove_duplicates) { - @lines = mailmap(@lines); - } + if ($divisor <= 0) { + warn("Bad divisor in " . (caller(0))[3] . ": $divisor\n"); + $divisor = 1; + } - $hash{$_}++ for @lines; + if ($email_remove_duplicates) { + @lines = mailmap(@lines); } - $count = 0; + @lines = sort(@lines); + + # uniq -c + $hash{$_}++ for @lines; + + # sort -rn foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) { my $sign_offs = $hash{$line}; + my $percent = $sign_offs * 100 / $divisor; + + $percent = 100 if ($percent > 100); $count++; last if ($sign_offs < $email_git_min_signatures || $count > $email_git_max_maintainers || - $sign_offs * 100 / $total_sign_offs < $email_git_min_percent); - push_email_address($line); + $percent < $email_git_min_percent); + push_email_address($line, ''); + if ($output_rolestats) { + my $fmt_percent = sprintf("%.0f", $percent); + add_role($line, "$role:$sign_offs/$divisor=$fmt_percent%"); + } else { + add_role($line, $role); + } + } +} + +sub vcs_file_signoffs { + my ($file) = @_; + + my @signers = (); + my $commits; + + return if (!vcs_exists()); + + my $cmd = $VCS_cmds{"find_signers_cmd"}; + $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd + + ($commits, @signers) = vcs_find_signers($cmd); + vcs_assign("commit_signer", $commits, @signers); +} + +sub vcs_file_blame { + my ($file) = @_; + + my @signers = (); + my @commits = (); + my $total_commits; + + return if (!vcs_exists()); + + @commits = vcs_blame($file); + @commits = uniq(@commits); + $total_commits = @commits; + + foreach my $commit (@commits) { + my $commit_count; + my @commit_signers = (); + + my $cmd = $VCS_cmds{"find_commit_signers_cmd"}; + $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd + + ($commit_count, @commit_signers) = vcs_find_signers($cmd); + push(@signers, @commit_signers); + } + + if ($from_filename) { + vcs_assign("commits", $total_commits, @signers); + } else { + vcs_assign("modified commits", $total_commits, @signers); } } sub uniq { - my @parms = @_; + my (@parms) = @_; my %saw; @parms = grep(!$saw{$_}++, @parms); @@ -841,7 +1067,7 @@ sub uniq { } sub sort_and_uniq { - my @parms = @_; + my (@parms) = @_; my %saw; @parms = sort @parms; @@ -849,8 +1075,27 @@ sub sort_and_uniq { return @parms; } +sub merge_email { + my @lines; + my %saw; + + for (@_) { + my ($address, $role) = @$_; + if (!$saw{$address}) { + if ($output_roles) { + push(@lines, "$address ($role)"); + } else { + push(@lines, $address); + } + $saw{$address} = 1; + } + } + + return @lines; +} + sub output { - my @parms = @_; + my (@parms) = @_; if ($output_multiline) { foreach my $line (@parms) { @@ -947,11 +1192,9 @@ sub rfc822_validlist ($) { if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so && $s =~ m/^$rfc822_char*$/) { while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) { - push @r, $1; + push(@r, $1); } return wantarray ? (scalar(@r), @r) : 1; } - else { - return wantarray ? () : 0; - } + return wantarray ? () : 0; } diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c index b4b48afb6de..5d9411839cd 100644 --- a/sound/arm/pxa2xx-ac97.c +++ b/sound/arm/pxa2xx-ac97.c @@ -159,7 +159,7 @@ static int pxa2xx_ac97_resume(struct device *dev) return ret; } -static struct dev_pm_ops pxa2xx_ac97_pm_ops = { +static const struct dev_pm_ops pxa2xx_ac97_pm_ops = { .suspend = pxa2xx_ac97_suspend, .resume = pxa2xx_ac97_resume, }; diff --git a/sound/isa/gus/gus_mem.c b/sound/isa/gus/gus_mem.c index 661205c4dce..af888a022fc 100644 --- a/sound/isa/gus/gus_mem.c +++ b/sound/isa/gus/gus_mem.c @@ -127,7 +127,8 @@ static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc, !share_id[2] && !share_id[3]) return NULL; for (block = alloc->first; block; block = block->next) - if (!memcmp(share_id, block->share_id, sizeof(share_id))) + if (!memcmp(share_id, block->share_id, + sizeof(block->share_id))) return block; return NULL; } diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 20cb60afb20..c1192062300 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -2122,7 +2122,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template, } /* nothing should be in powerdown mode */ snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0); - end_time = jiffies + msecs_to_jiffies(120); + end_time = jiffies + msecs_to_jiffies(5000); do { if ((snd_ac97_read(ac97, AC97_POWERDOWN) & 0x0f) == 0x0f) goto __ready_ok; diff --git a/sound/pci/cs5535audio/Makefile b/sound/pci/cs5535audio/Makefile index fda7a94c992..ccc642269b9 100644 --- a/sound/pci/cs5535audio/Makefile +++ b/sound/pci/cs5535audio/Makefile @@ -4,9 +4,7 @@ snd-cs5535audio-y := cs5535audio.o cs5535audio_pcm.o snd-cs5535audio-$(CONFIG_PM) += cs5535audio_pm.o -ifdef CONFIG_MGEODE_LX snd-cs5535audio-$(CONFIG_OLPC) += cs5535audio_olpc.o -endif # Toplevel Module Dependency obj-$(CONFIG_SND_CS5535AUDIO) += snd-cs5535audio.o diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c index 05f56e04849..91e7faf69bb 100644 --- a/sound/pci/cs5535audio/cs5535audio.c +++ b/sound/pci/cs5535audio/cs5535audio.c @@ -389,6 +389,7 @@ probefail_out: static void __devexit snd_cs5535audio_remove(struct pci_dev *pci) { + olpc_quirks_cleanup(); snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h index 7a298ac662e..51966d782a3 100644 --- a/sound/pci/cs5535audio/cs5535audio.h +++ b/sound/pci/cs5535audio/cs5535audio.h @@ -99,10 +99,11 @@ int snd_cs5535audio_suspend(struct pci_dev *pci, pm_message_t state); int snd_cs5535audio_resume(struct pci_dev *pci); #endif -#if defined(CONFIG_OLPC) && defined(CONFIG_MGEODE_LX) +#ifdef CONFIG_OLPC void __devinit olpc_prequirks(struct snd_card *card, struct snd_ac97_template *ac97); int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97); +void __devexit olpc_quirks_cleanup(void); void olpc_analog_input(struct snd_ac97 *ac97, int on); void olpc_mic_bias(struct snd_ac97 *ac97, int on); @@ -128,6 +129,7 @@ static inline int olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97) { return 0; } +static inline void olpc_quirks_cleanup(void) { } static inline void olpc_analog_input(struct snd_ac97 *ac97, int on) { } static inline void olpc_mic_bias(struct snd_ac97 *ac97, int on) { } static inline void olpc_capture_open(struct snd_ac97 *ac97) { } diff --git a/sound/pci/cs5535audio/cs5535audio_olpc.c b/sound/pci/cs5535audio/cs5535audio_olpc.c index 5c6814335cd..50da49be9ae 100644 --- a/sound/pci/cs5535audio/cs5535audio_olpc.c +++ b/sound/pci/cs5535audio/cs5535audio_olpc.c @@ -13,10 +13,13 @@ #include <sound/info.h> #include <sound/control.h> #include <sound/ac97_codec.h> +#include <linux/gpio.h> #include <asm/olpc.h> #include "cs5535audio.h" +#define DRV_NAME "cs5535audio-olpc" + /* * OLPC has an additional feature on top of the regular AD1888 codec features. * It has an Analog Input mode that is switched into (after disabling the @@ -38,10 +41,7 @@ void olpc_analog_input(struct snd_ac97 *ac97, int on) } /* set Analog Input through GPIO */ - if (on) - geode_gpio_set(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL); - else - geode_gpio_clear(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL); + gpio_set_value(OLPC_GPIO_MIC_AC, on); } /* @@ -73,8 +73,7 @@ static int olpc_dc_info(struct snd_kcontrol *kctl, static int olpc_dc_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *v) { - v->value.integer.value[0] = geode_gpio_isset(OLPC_GPIO_MIC_AC, - GPIO_OUTPUT_VAL); + v->value.integer.value[0] = gpio_get_value(OLPC_GPIO_MIC_AC); return 0; } @@ -153,6 +152,12 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97) if (!machine_is_olpc()) return 0; + if (gpio_request(OLPC_GPIO_MIC_AC, DRV_NAME)) { + printk(KERN_ERR DRV_NAME ": unable to allocate MIC GPIO\n"); + return -EIO; + } + gpio_direction_output(OLPC_GPIO_MIC_AC, 0); + /* drop the original AD1888 HPF control */ memset(&elem, 0, sizeof(elem)); elem.iface = SNDRV_CTL_ELEM_IFACE_MIXER; @@ -169,11 +174,18 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97) for (i = 0; i < ARRAY_SIZE(olpc_cs5535audio_ctls); i++) { err = snd_ctl_add(card, snd_ctl_new1(&olpc_cs5535audio_ctls[i], ac97->private_data)); - if (err < 0) + if (err < 0) { + gpio_free(OLPC_GPIO_MIC_AC); return err; + } } /* turn off the mic by default */ olpc_mic_bias(ac97, 0); return 0; } + +void __devexit olpc_quirks_cleanup(void) +{ + gpio_free(OLPC_GPIO_MIC_AC); +} diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c index d24328661c6..40ccb419b6e 100644 --- a/sound/pci/hda/hda_hwdep.c +++ b/sound/pci/hda/hda_hwdep.c @@ -24,6 +24,7 @@ #include <linux/compat.h> #include <linux/mutex.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/firmware.h> #include <sound/core.h> #include "hda_codec.h" @@ -428,8 +429,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf) char *key, *val; struct hda_hint *hint; - while (isspace(*buf)) - buf++; + buf = skip_spaces(buf); if (!*buf || *buf == '#' || *buf == '\n') return 0; if (*buf == '=') @@ -444,8 +444,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf) return -EINVAL; } *val++ = 0; - while (isspace(*val)) - val++; + val = skip_spaces(val); remove_trail_spaces(key); remove_trail_spaces(val); hint = get_hint(codec, key); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e54420e691a..9b56f937913 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2713,6 +2713,9 @@ static struct pci_device_id azx_ids[] = { { PCI_DEVICE(0x10de, 0x0ac1), .driver_data = AZX_DRIVER_NVIDIA }, { PCI_DEVICE(0x10de, 0x0ac2), .driver_data = AZX_DRIVER_NVIDIA }, { PCI_DEVICE(0x10de, 0x0ac3), .driver_data = AZX_DRIVER_NVIDIA }, + { PCI_DEVICE(0x10de, 0x0be2), .driver_data = AZX_DRIVER_NVIDIA }, + { PCI_DEVICE(0x10de, 0x0be3), .driver_data = AZX_DRIVER_NVIDIA }, + { PCI_DEVICE(0x10de, 0x0be4), .driver_data = AZX_DRIVER_NVIDIA }, { PCI_DEVICE(0x10de, 0x0d94), .driver_data = AZX_DRIVER_NVIDIA }, { PCI_DEVICE(0x10de, 0x0d95), .driver_data = AZX_DRIVER_NVIDIA }, { PCI_DEVICE(0x10de, 0x0d96), .driver_data = AZX_DRIVER_NVIDIA }, diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 447eda1f677..1a36137e13e 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -1789,6 +1789,14 @@ static int patch_ad1981(struct hda_codec *codec) codec->patch_ops.init = ad1981_hp_init; codec->patch_ops.unsol_event = ad1981_hp_unsol_event; + /* set the upper-limit for mixer amp to 0dB for avoiding the + * possible damage by overloading + */ + snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT, + (0x17 << AC_AMPCAP_OFFSET_SHIFT) | + (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) | + (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) | + (1 << AC_AMPCAP_MUTE_SHIFT)); break; case AD1981_THINKPAD: spec->mixers[0] = ad1981_thinkpad_mixers; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 888b6313eec..aeed4cc5aa7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6248,6 +6248,7 @@ static const char *alc260_models[ALC260_MODEL_LAST] = { static struct snd_pci_quirk alc260_cfg_tbl[] = { SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER), + SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL), SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER), SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100), SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013), diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c index c9438dd62df..dbc368c0826 100644 --- a/sound/soc/codecs/wm8900.c +++ b/sound/soc/codecs/wm8900.c @@ -199,7 +199,7 @@ static void wm8900_reset(struct snd_soc_codec *codec) snd_soc_write(codec, WM8900_REG_RESET, 0); memcpy(codec->reg_cache, wm8900_reg_defaults, - sizeof(codec->reg_cache)); + sizeof(wm8900_reg_defaults)); } static int wm8900_hp_event(struct snd_soc_dapm_widget *w, diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c index d441c3b6463..4984754f329 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec.c +++ b/sound/soc/s3c24xx/s3c24xx_simtec.c @@ -312,7 +312,7 @@ int simtec_audio_resume(struct device *dev) return 0; } -struct dev_pm_ops simtec_audio_pmops = { +const struct dev_pm_ops simtec_audio_pmops = { .resume = simtec_audio_resume, }; EXPORT_SYMBOL_GPL(simtec_audio_pmops); diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h index 2714203af16..e18faee30cc 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec.h +++ b/sound/soc/s3c24xx/s3c24xx_simtec.h @@ -15,7 +15,7 @@ extern int simtec_audio_core_probe(struct platform_device *pdev, extern int simtec_audio_remove(struct platform_device *pdev); #ifdef CONFIG_PM -extern struct dev_pm_ops simtec_audio_pmops; +extern const struct dev_pm_ops simtec_audio_pmops; #define simtec_audio_pm &simtec_audio_pmops #else #define simtec_audio_pm NULL diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index ef8f28284cb..0a6440c6f54 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1236,7 +1236,7 @@ static int soc_poweroff(struct device *dev) return 0; } -static struct dev_pm_ops soc_pm_ops = { +static const struct dev_pm_ops soc_pm_ops = { .suspend = soc_suspend, .resume = soc_resume, .poweroff = soc_poweroff, |