aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/Makefile4
-rw-r--r--Documentation/DocBook/deviceiobook.tmpl2
-rw-r--r--Documentation/DocBook/mcabook.tmpl2
-rw-r--r--Documentation/DocBook/wanbook.tmpl2
-rw-r--r--Documentation/DocBook/z8530book.tmpl2
-rw-r--r--Documentation/ManagementStyle2
-rw-r--r--Documentation/accounting/.gitignore1
-rw-r--r--Documentation/auxdisplay/.gitignore1
-rw-r--r--Documentation/connector/.gitignore1
-rw-r--r--Documentation/filesystems/Locking12
-rw-r--r--Documentation/filesystems/vfs.txt39
-rw-r--r--Documentation/ia64/.gitignore1
-rw-r--r--Documentation/isdn/CREDITS2
-rw-r--r--Documentation/networking/.gitignore1
-rw-r--r--Documentation/networking/dmfe.txt2
-rw-r--r--Documentation/pcmcia/.gitignore1
-rw-r--r--Documentation/scsi/aacraid.txt2
-rw-r--r--Documentation/spi/.gitignore2
-rw-r--r--Documentation/video4linux/.gitignore1
-rw-r--r--Documentation/video4linux/bttv/CONTRIBUTORS2
-rw-r--r--Documentation/vm/.gitignore1
-rw-r--r--Documentation/watchdog/src/.gitignore2
-rw-r--r--MAINTAINERS10
-rw-r--r--arch/x86/boot/compressed/.gitignore2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/cell_edac.c3
-rw-r--r--drivers/edac/x38_edac.c524
-rw-r--r--drivers/message/i2o/i2o_block.c12
-rw-r--r--drivers/misc/hdpuftrs/hdpu_nexus.c1
-rw-r--r--drivers/misc/sgi-xp/Makefile4
-rw-r--r--drivers/misc/sgi-xp/xp.h4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c4
-rw-r--r--drivers/misc/sony-laptop.c6
-rw-r--r--drivers/rtc/rtc-ds3234.c4
-rw-r--r--drivers/rtc/rtc-s3c.c8
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/via/global.h3
-rw-r--r--fs/ecryptfs/crypto.c15
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/jbd/transaction.c1
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/splice.c4
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/freezer.h5
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/resource.h4
-rw-r--r--include/linux/security.h6
-rw-r--r--include/linux/spi/spi_bitbang.h3
-rw-r--r--init/Kconfig16
-rw-r--r--init/do_mounts_md.c2
-rw-r--r--kernel/cgroup_freezer.c51
-rw-r--r--kernel/freezer.c20
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/signal.c3
-rw-r--r--mm/filemap.c242
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/vmalloc.c3
-rw-r--r--security/security.c9
66 files changed, 705 insertions, 407 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index fabc06466b9..9b1f6ca100d 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -136,7 +136,7 @@ quiet_cmd_db2ps = PS $@
%.ps : %.xml
$(call cmd,db2ps)
-quiet_cmd_db2pdf = PDF $@
+quiet_cmd_db2pdf = PDF $@
cmd_db2pdf = $(subst TYPE,pdf, $($(PDF_METHOD)template))
%.pdf : %.xml
$(call cmd,db2pdf)
@@ -148,7 +148,7 @@ build_main_index = rm -rf $(main_idx) && \
echo '<h2>Kernel Version: $(KERNELVERSION)</h2>' >> $(main_idx) && \
cat $(HTML) >> $(main_idx)
-quiet_cmd_db2html = HTML $@
+quiet_cmd_db2html = HTML $@
cmd_db2html = xmlto xhtml $(XMLTOFLAGS) -o $(patsubst %.html,%,$@) $< && \
echo '<a HREF="$(patsubst %.html,%,$(notdir $@))/index.html"> \
$(patsubst %.html,%,$(notdir $@))</a><p>' > $@
diff --git a/Documentation/DocBook/deviceiobook.tmpl b/Documentation/DocBook/deviceiobook.tmpl
index 60d6e0b81bd..3ed88126ab8 100644
--- a/Documentation/DocBook/deviceiobook.tmpl
+++ b/Documentation/DocBook/deviceiobook.tmpl
@@ -24,7 +24,7 @@
<surname>Cox</surname>
<affiliation>
<address>
- <email>alan@redhat.com</email>
+ <email>alan@lxorguk.ukuu.org.uk</email>
</address>
</affiliation>
</author>
diff --git a/Documentation/DocBook/mcabook.tmpl b/Documentation/DocBook/mcabook.tmpl
index 499eddc2d07..467ccac6ec5 100644
--- a/Documentation/DocBook/mcabook.tmpl
+++ b/Documentation/DocBook/mcabook.tmpl
@@ -12,7 +12,7 @@
<surname>Cox</surname>
<affiliation>
<address>
- <email>alan@redhat.com</email>
+ <email>alan@lxorguk.ukuu.org.uk</email>
</address>
</affiliation>
</author>
diff --git a/Documentation/DocBook/wanbook.tmpl b/Documentation/DocBook/wanbook.tmpl
index 9eebcc304de..8c93db122f0 100644
--- a/Documentation/DocBook/wanbook.tmpl
+++ b/Documentation/DocBook/wanbook.tmpl
@@ -12,7 +12,7 @@
<surname>Cox</surname>
<affiliation>
<address>
- <email>alan@redhat.com</email>
+ <email>alan@lxorguk.ukuu.org.uk</email>
</address>
</affiliation>
</author>
diff --git a/Documentation/DocBook/z8530book.tmpl b/Documentation/DocBook/z8530book.tmpl
index a42a8a4c768..6f3883be877 100644
--- a/Documentation/DocBook/z8530book.tmpl
+++ b/Documentation/DocBook/z8530book.tmpl
@@ -12,7 +12,7 @@
<surname>Cox</surname>
<affiliation>
<address>
- <email>alan@redhat.com</email>
+ <email>alan@lxorguk.ukuu.org.uk</email>
</address>
</affiliation>
</author>
diff --git a/Documentation/ManagementStyle b/Documentation/ManagementStyle
index 49a8efa5afe..a5f0ea58c78 100644
--- a/Documentation/ManagementStyle
+++ b/Documentation/ManagementStyle
@@ -17,7 +17,7 @@ companies. If you sign purchase orders or you have any clue about the
budget of your group, you're almost certainly not a kernel manager.
These suggestions may or may not apply to you.
-First off, I'd suggest buying "Seven Habits of Highly Successful
+First off, I'd suggest buying "Seven Habits of Highly Effective
People", and NOT read it. Burn it, it's a great symbolic gesture.
(*) This document does so not so much by answering the question, but by
diff --git a/Documentation/accounting/.gitignore b/Documentation/accounting/.gitignore
new file mode 100644
index 00000000000..86485203c4a
--- /dev/null
+++ b/Documentation/accounting/.gitignore
@@ -0,0 +1 @@
+getdelays
diff --git a/Documentation/auxdisplay/.gitignore b/Documentation/auxdisplay/.gitignore
new file mode 100644
index 00000000000..7af222860a9
--- /dev/null
+++ b/Documentation/auxdisplay/.gitignore
@@ -0,0 +1 @@
+cfag12864b-example
diff --git a/Documentation/connector/.gitignore b/Documentation/connector/.gitignore
new file mode 100644
index 00000000000..d2b9c32accd
--- /dev/null
+++ b/Documentation/connector/.gitignore
@@ -0,0 +1 @@
+ucon
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 8362860e21a..23d2f4460de 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -161,8 +161,12 @@ prototypes:
int (*set_page_dirty)(struct page *page);
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
- int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
- int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
+ int (*write_begin)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+ int (*write_end)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int);
@@ -180,8 +184,6 @@ sync_page: no maybe
writepages: no
set_page_dirty no no
readpages: no
-prepare_write: no yes yes
-commit_write: no yes yes
write_begin: no locks the page yes
write_end: no yes, unlocks yes
perform_write: no n/a yes
@@ -191,7 +193,7 @@ releasepage: no yes
direct_IO: no
launder_page: no yes
- ->prepare_write(), ->commit_write(), ->sync_page() and ->readpage()
+ ->write_begin(), ->write_end(), ->sync_page() and ->readpage()
may be called from the request handler (/dev/loop).
->readpage() unlocks the page, either synchronously or via I/O
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index c4d348dabe9..5579bda58a6 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -492,7 +492,7 @@ written-back to storage typically in whole pages, however the
address_space has finer control of write sizes.
The read process essentially only requires 'readpage'. The write
-process is more complicated and uses prepare_write/commit_write or
+process is more complicated and uses write_begin/write_end or
set_page_dirty to write data into the address_space, and writepage,
sync_page, and writepages to writeback data to storage.
@@ -521,8 +521,6 @@ struct address_space_operations {
int (*set_page_dirty)(struct page *page);
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
- int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
- int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
@@ -598,37 +596,7 @@ struct address_space_operations {
readpages is only used for read-ahead, so read errors are
ignored. If anything goes wrong, feel free to give up.
- prepare_write: called by the generic write path in VM to set up a write
- request for a page. This indicates to the address space that
- the given range of bytes is about to be written. The
- address_space should check that the write will be able to
- complete, by allocating space if necessary and doing any other
- internal housekeeping. If the write will update parts of
- any basic-blocks on storage, then those blocks should be
- pre-read (if they haven't been read already) so that the
- updated blocks can be written out properly.
- The page will be locked.
-
- Note: the page _must not_ be marked uptodate in this function
- (or anywhere else) unless it actually is uptodate right now. As
- soon as a page is marked uptodate, it is possible for a concurrent
- read(2) to copy it to userspace.
-
- commit_write: If prepare_write succeeds, new data will be copied
- into the page and then commit_write will be called. It will
- typically update the size of the file (if appropriate) and
- mark the inode as dirty, and do any other related housekeeping
- operations. It should avoid returning an error if possible -
- errors should have been handled by prepare_write.
-
- write_begin: This is intended as a replacement for prepare_write. The
- key differences being that:
- - it returns a locked page (in *pagep) rather than being
- given a pre locked page;
- - it must be able to cope with short writes (where the
- length passed to write_begin is greater than the number
- of bytes copied into the page).
-
+ write_begin:
Called by the generic buffered write code to ask the filesystem to
prepare to write len bytes at the given offset in the file. The
address_space should check that the write will be able to complete,
@@ -640,6 +608,9 @@ struct address_space_operations {
The filesystem must return the locked pagecache page for the specified
offset, in *pagep, for the caller to write into.
+ It must be able to cope with short writes (where the length passed to
+ write_begin is greater than the number of bytes copied into the page).
+
flags is a field for AOP_FLAG_xxx flags, described in
include/linux/fs.h.
diff --git a/Documentation/ia64/.gitignore b/Documentation/ia64/.gitignore
new file mode 100644
index 00000000000..ab806edc873
--- /dev/null
+++ b/Documentation/ia64/.gitignore
@@ -0,0 +1 @@
+aliasing-test
diff --git a/Documentation/isdn/CREDITS b/Documentation/isdn/CREDITS
index 8cac6c2f23e..c1679e913fc 100644
--- a/Documentation/isdn/CREDITS
+++ b/Documentation/isdn/CREDITS
@@ -5,7 +5,7 @@ I want to thank all who contributed to this project and especially to:
Thomas Bogendörfer (tsbogend@bigbug.franken.de)
Tester, lots of bugfixes and hints.
-Alan Cox (alan@redhat.com)
+Alan Cox (alan@lxorguk.ukuu.org.uk)
For help getting into standard-kernel.
Henner Eisen (eis@baty.hanse.de)
diff --git a/Documentation/networking/.gitignore b/Documentation/networking/.gitignore
new file mode 100644
index 00000000000..286a5680f49
--- /dev/null
+++ b/Documentation/networking/.gitignore
@@ -0,0 +1 @@
+ifenslave
diff --git a/Documentation/networking/dmfe.txt b/Documentation/networking/dmfe.txt
index b1b7499dd9d..8006c227fda 100644
--- a/Documentation/networking/dmfe.txt
+++ b/Documentation/networking/dmfe.txt
@@ -60,6 +60,6 @@ Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer
Contributors:
Marcelo Tosatti <marcelo@conectiva.com.br>
-Alan Cox <alan@redhat.com>
+Alan Cox <alan@lxorguk.ukuu.org.uk>
Jeff Garzik <jgarzik@pobox.com>
Vojtech Pavlik <vojtech@suse.cz>
diff --git a/Documentation/pcmcia/.gitignore b/Documentation/pcmcia/.gitignore
new file mode 100644
index 00000000000..53d08133675
--- /dev/null
+++ b/Documentation/pcmcia/.gitignore
@@ -0,0 +1 @@
+crc32hash
diff --git a/Documentation/scsi/aacraid.txt b/Documentation/scsi/aacraid.txt
index 709ca991a45..ddace3afc83 100644
--- a/Documentation/scsi/aacraid.txt
+++ b/Documentation/scsi/aacraid.txt
@@ -128,7 +128,7 @@ Supported Cards/Chipsets
People
-------------------------
-Alan Cox <alan@redhat.com>
+Alan Cox <alan@lxorguk.ukuu.org.uk>
Christoph Hellwig <hch@infradead.org> (updates for new-style PCI probing and SCSI host registration,
small cleanups/fixes)
Matt Domsch <matt_domsch@dell.com> (revision ioctl, adapter messages)
diff --git a/Documentation/spi/.gitignore b/Documentation/spi/.gitignore
new file mode 100644
index 00000000000..4280576397e
--- /dev/null
+++ b/Documentation/spi/.gitignore
@@ -0,0 +1,2 @@
+spidev_fdx
+spidev_test
diff --git a/Documentation/video4linux/.gitignore b/Documentation/video4linux/.gitignore
new file mode 100644
index 00000000000..952703943e8
--- /dev/null
+++ b/Documentation/video4linux/.gitignore
@@ -0,0 +1 @@
+v4lgrab
diff --git a/Documentation/video4linux/bttv/CONTRIBUTORS b/Documentation/video4linux/bttv/CONTRIBUTORS
index 8aad6dd93d6..eb41b265086 100644
--- a/Documentation/video4linux/bttv/CONTRIBUTORS
+++ b/Documentation/video4linux/bttv/CONTRIBUTORS
@@ -3,7 +3,7 @@ Contributors to bttv:
Michael Chu <mmchu@pobox.com>
AverMedia fix and more flexible card recognition
-Alan Cox <alan@redhat.com>
+Alan Cox <alan@lxorguk.ukuu.org.uk>
Video4Linux interface and 2.1.x kernel adaptation
Chris Kleitsch
diff --git a/Documentation/vm/.gitignore b/Documentation/vm/.gitignore
new file mode 100644
index 00000000000..33e8a023df0
--- /dev/null
+++ b/Documentation/vm/.gitignore
@@ -0,0 +1 @@
+slabinfo
diff --git a/Documentation/watchdog/src/.gitignore b/Documentation/watchdog/src/.gitignore
new file mode 100644
index 00000000000..ac90997dba9
--- /dev/null
+++ b/Documentation/watchdog/src/.gitignore
@@ -0,0 +1,2 @@
+watchdog-simple
+watchdog-test
diff --git a/MAINTAINERS b/MAINTAINERS
index 16202c8ac68..ce521d25ce3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2701,6 +2701,16 @@ M: matthew@wil.cx
L: linux-scsi@vger.kernel.org
S: Maintained
+LTP (Linux Test Project)
+P: Subrata Modak
+M: subrata@linux.vnet.ibm.com
+P: Mike Frysinger
+M: vapier@gentoo.org
+L: ltp-list@lists.sourceforge.net (subscribers-only)
+W: http://ltp.sourceforge.net/
+T: git kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
+S: Maintained
+
M32R ARCHITECTURE
P: Hirokazu Takata
M: takata@linux-m32r.org
diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore
index be0ed065249..63eff3b04d0 100644
--- a/arch/x86/boot/compressed/.gitignore
+++ b/arch/x86/boot/compressed/.gitignore
@@ -1 +1,3 @@
relocs
+vmlinux.bin.all
+vmlinux.relocs
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3f09cd8bcc3..5c4ee70d5cf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -40,8 +40,7 @@
* Heinz Mauelshagen <mge@sistina.com>, Feb 2002
*
* Support for falling back on the write file operation when the address space
- * operations prepare_write and/or commit_write are not available on the
- * backing filesystem.
+ * operations write_begin is not available on the backing filesystem.
* Anton Altaparmakov, 16 Feb 2005
*
* Still To Fix:
@@ -765,7 +764,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
*/
if (!file->f_op->splice_read)
goto out_putf;
- if (aops->prepare_write || aops->write_begin)
+ if (aops->write_begin)
lo_flags |= LO_FLAGS_USE_AOPS;
if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
lo_flags |= LO_FLAGS_READ_ONLY;
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 835a33c8d5f..1d7b429f7ff 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -957,3 +957,4 @@ module_exit(cleanup_ipmi);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
+MODULE_ALIAS("platform:ipmi_si");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5a11e3cbcae..e0dbd388757 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -102,6 +102,13 @@ config EDAC_I3000
Support for error detection and correction on the Intel
3000 and 3010 server chipsets.
+config EDAC_X38
+ tristate "Intel X38"
+ depends on EDAC_MM_EDAC && PCI && X86
+ help
+ Support for error detection and correction on the Intel
+ X38 server chipsets.
+
config EDAC_I82860
tristate "Intel 82860"
depends on EDAC_MM_EDAC && PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index e5e9104b552..62c2d9bad8d 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
+obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 887072f5dc8..cd2e3b8087e 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -9,6 +9,7 @@
*/
#undef DEBUG
+#include <linux/edac.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -164,6 +165,8 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
if (regs == NULL)
return -ENODEV;
+ edac_op_state = EDAC_OPSTATE_POLL;
+
/* Get channel population */
reg = in_be64(&regs->mic_mnt_cfg);
dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg);
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
new file mode 100644
index 00000000000..2406c2ce284
--- /dev/null
+++ b/drivers/edac/x38_edac.c
@@ -0,0 +1,524 @@
+/*
+ * Intel X38 Memory Controller kernel module
+ * Copyright (C) 2008 Cluster Computing, Inc.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * This file is based on i3200_edac.c
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include "edac_core.h"
+
+#define X38_REVISION "1.1"
+
+#define EDAC_MOD_STR "x38_edac"
+
+#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
+
+#define X38_RANKS 8
+#define X38_RANKS_PER_CHANNEL 4
+#define X38_CHANNELS 2
+
+/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
+
+#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
+#define X38_MCHBAR_HIGH 0x4b
+#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
+#define X38_MMR_WINDOW_SIZE 16384
+
+#define X38_TOM 0xa0 /* Top of Memory (16b)
+ *
+ * 15:10 reserved
+ * 9:0 total populated physical memory
+ */
+#define X38_TOM_MASK 0x3ff /* bits 9:0 */
+#define X38_TOM_SHIFT 26 /* 64MiB grain */
+
+#define X38_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15 reserved
+ * 14 Isochronous TBWRR Run Behind FIFO Full
+ * (ITCV)
+ * 13 Isochronous TBWRR Run Behind FIFO Put
+ * (ITSTV)
+ * 12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR (GTSE)
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 reserved
+ * 7 DRAM Throttle Flag (DTF)
+ * 6:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define X38_ERRSTS_UE 0x0002
+#define X38_ERRSTS_CE 0x0001
+#define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
+
+
+/* Intel MMIO register space - device 0 function 0 - MMR space */
+
+#define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
+ *
+ * 15:10 reserved
+ * 9:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
+#define X38_DRB_MASK 0x3ff /* bits 9:0 */
+#define X38_DRB_SHIFT 26 /* 64MiB grain */
+
+#define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
+ *
+ * 63:48 Error Column Address (ERRCOL)
+ * 47:32 Error Row Address (ERRROW)
+ * 31:29 Error Bank Address (ERRBANK)
+ * 28:27 Error Rank Address (ERRRANK)
+ * 26:24 reserved
+ * 23:16 Error Syndrome (ERRSYND)
+ * 15: 2 reserved
+ * 1 Multiple Bit Error Status (MERRSTS)
+ * 0 Correctable Error Status (CERRSTS)
+ */
+#define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
+#define X38_ECCERRLOG_CE 0x1
+#define X38_ECCERRLOG_UE 0x2
+#define X38_ECCERRLOG_RANK_BITS 0x18000000
+#define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
+
+#define X38_CAPID0 0xe0 /* see P.94 of spec for details */
+
+static int x38_channel_num;
+
+static int how_many_channel(struct pci_dev *pdev)
+{
+ unsigned char capid0_8b; /* 8th byte of CAPID0 */
+
+ pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
+ if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
+ debugf0("In single channel mode.\n");
+ x38_channel_num = 1;
+ } else {
+ debugf0("In dual channel mode.\n");
+ x38_channel_num = 2;
+ }
+
+ return x38_channel_num;
+}
+
+static unsigned long eccerrlog_syndrome(u64 log)
+{
+ return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+ return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
+ (channel * X38_RANKS_PER_CHANNEL);
+}
+
+enum x38_chips {
+ X38 = 0,
+};
+
+struct x38_dev_info {
+ const char *ctl_name;
+};
+
+struct x38_error_info {
+ u16 errsts;
+ u16 errsts2;
+ u64 eccerrlog[X38_CHANNELS];
+};
+
+static const struct x38_dev_info x38_devs[] = {
+ [X38] = {
+ .ctl_name = "x38"},
+};
+
+static struct pci_dev *mci_pdev;
+static int x38_registered = 1;
+
+
+static void x38_clear_error_info(struct mem_ctl_info *mci)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
+ X38_ERRSTS_BITS);
+}
+
+static u64 x38_readq(const void __iomem *addr)
+{
+ return readl(addr) | (((u64)readl(addr + 4)) << 32);
+}
+
+static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
+ struct x38_error_info *info)
+{
+ struct pci_dev *pdev;
+ void __iomem *window = mci->pvt_info;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
+ if (!(info->errsts & X38_ERRSTS_BITS))
+ return;
+
+ info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+ if (x38_channel_num == 2)
+ info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
+
+ pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same for both reads then the first set
+ * of reads is valid. If there is a change then there is a CE
+ * with no info and the second set of reads is valid and
+ * should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+ info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+ if (x38_channel_num == 2)
+ info->eccerrlog[1] =
+ x38_readq(window + X38_C1ECCERRLOG);
+ }
+
+ x38_clear_error_info(mci);
+}
+
+static void x38_process_error_info(struct mem_ctl_info *mci,
+ struct x38_error_info *info)
+{
+ int channel;
+ u64 log;
+
+ if (!(info->errsts & X38_ERRSTS_BITS))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ for (channel = 0; channel < x38_channel_num; channel++) {
+ log = info->eccerrlog[channel];
+ if (log & X38_ECCERRLOG_UE) {
+ edac_mc_handle_ue(mci, 0, 0,
+ eccerrlog_row(channel, log), "x38 UE");
+ } else if (log & X38_ECCERRLOG_CE) {
+ edac_mc_handle_ce(mci, 0, 0,
+ eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log), 0, "x38 CE");
+ }
+ }
+}
+
+static void x38_check(struct mem_ctl_info *mci)
+{
+ struct x38_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ x38_get_and_clear_error_info(mci, &info);
+ x38_process_error_info(mci, &info);
+}
+
+
+void __iomem *x38_map_mchbar(struct pci_dev *pdev)
+{
+ union {
+ u64 mchbar;
+ struct {
+ u32 mchbar_low;
+ u32 mchbar_high;
+ };
+ } u;
+ void __iomem *window;
+
+ pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
+ pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
+ pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
+ u.mchbar &= X38_MCHBAR_MASK;
+
+ if (u.mchbar != (resource_size_t)u.mchbar) {
+ printk(KERN_ERR
+ "x38: mmio space beyond accessible range (0x%llx)\n",
+ (unsigned long long)u.mchbar);
+ return NULL;
+ }
+
+ window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+ if (!window)
+ printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
+ (unsigned long long)u.mchbar);
+
+ return window;
+}
+
+
+static void x38_get_drbs(void __iomem *window,
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+ int i;
+
+ for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
+ drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
+ drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
+ }
+}
+
+static bool x38_is_stacked(struct pci_dev *pdev,
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
+{
+ u16 tom;
+
+ pci_read_config_word(pdev, X38_TOM, &tom);
+ tom &= X38_TOM_MASK;
+
+ return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
+}
+
+static unsigned long drb_to_nr_pages(
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
+ bool stacked, int channel, int rank)
+{
+ int n;
+
+ n = drbs[channel][rank];
+ if (rank > 0)
+ n -= drbs[channel][rank - 1];
+ if (stacked && (channel == 1) && drbs[channel][rank] ==
+ drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
+ n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
+ }
+
+ n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
+ return n;
+}
+
+static int x38_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc;
+ int i;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_page;
+ u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
+ bool stacked;
+ void __iomem *window;
+
+ debugf0("MC: %s()\n", __func__);
+
+ window = x38_map_mchbar(pdev);
+ if (!window)
+ return -ENODEV;
+
+ x38_get_drbs(window, drbs);
+
+ how_many_channel(pdev);
+
+ /* FIXME: unconventional pvt_info usage */
+ mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: %s(): init mci\n", __func__);
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = X38_REVISION;
+ mci->ctl_name = x38_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = x38_check;
+ mci->ctl_page_to_phys = NULL;
+ mci->pvt_info = window;
+
+ stacked = x38_is_stacked(pdev, drbs);
+
+ /*
+ * The dram rank boundary (DRB) reg values are boundary addresses
+ * for each DRAM rank with a granularity of 64MB. DRB regs are
+ * cumulative; the last one will contain the total memory
+ * contained in all ranks.
+ */
+ last_page = -1UL;
+ for (i = 0; i < mci->nr_csrows; i++) {
+ unsigned long nr_pages;
+ struct csrow_info *csrow = &mci->csrows[i];
+
+ nr_pages = drb_to_nr_pages(drbs, stacked,
+ i / X38_RANKS_PER_CHANNEL,
+ i % X38_RANKS_PER_CHANNEL);
+
+ if (nr_pages == 0) {
+ csrow->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ csrow->first_page = last_page + 1;
+ last_page += nr_pages;
+ csrow->last_page = last_page;
+ csrow->nr_pages = nr_pages;
+
+ csrow->grain = nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_DDR2;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = EDAC_UNKNOWN;
+ }
+
+ x38_clear_error_info(mci);
+
+ rc = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: %s(): success\n", __func__);
+ return 0;
+
+fail:
+ iounmap(window);
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+static int __devinit x38_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: %s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = x38_probe1(pdev, ent->driver_data);
+ if (!mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit x38_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ iounmap(mci->pvt_info);
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ X38},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
+
+static struct pci_driver x38_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = x38_init_one,
+ .remove = __devexit_p(x38_remove_one),
+ .id_table = x38_pci_tbl,
+};
+
+static int __init x38_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&x38_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ x38_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_X38_HB, NULL);
+ if (!mci_pdev) {
+ debugf0("x38 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("x38 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&x38_driver);
+
+fail0:
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit x38_exit(void)
+{
+ debugf3("MC: %s()\n", __func__);
+
+ pci_unregister_driver(&x38_driver);
+ if (!x38_registered) {
+ x38_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(x38_init);
+module_exit(x38_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
+MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 11a617ab424..84bdc2ee69e 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -567,8 +567,8 @@ static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
/**
* i2o_block_open - Open the block device
- * @inode: inode for block device being opened
- * @file: file to open
+ * @bdev: block device being opened
+ * @mode: file open mode
*
* Power up the device, mount and lock the media. This function is called,
* if the block device is opened for access.
@@ -596,8 +596,8 @@ static int i2o_block_open(struct block_device *bdev, fmode_t mode)
/**
* i2o_block_release - Release the I2O block device
- * @inode: inode for block device being released
- * @file: file to close
+ * @disk: gendisk device being released
+ * @mode: file open mode
*
* Unlock and unmount the media, and power down the device. Gets called if
* the block device is closed.
@@ -643,8 +643,8 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
/**
* i2o_block_ioctl - Issue device specific ioctl calls.
- * @inode: inode for block device ioctl
- * @file: file for ioctl
+ * @bdev: block device being opened
+ * @mode: file open mode
* @cmd: ioctl command
* @arg: arg
*
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
index 08e26beefe6..ce39fa54949 100644
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ b/drivers/misc/hdpuftrs/hdpu_nexus.c
@@ -113,7 +113,6 @@ static int hdpu_nexus_probe(struct platform_device *pdev)
if (!hdpu_chassis_id)
printk(KERN_WARNING "sky_nexus: "
"Unable to create proc dir entry: sky_chassis_id\n");
- }
return 0;
}
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
index 35ce2857807..4fc40d8e1bc 100644
--- a/drivers/misc/sgi-xp/Makefile
+++ b/drivers/misc/sgi-xp/Makefile
@@ -5,14 +5,14 @@
obj-$(CONFIG_SGI_XP) += xp.o
xp-y := xp_main.o
xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o
-xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o xp_uv.o
+xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o
xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o
xp-$(CONFIG_X86_64) += xp_uv.o
obj-$(CONFIG_SGI_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o
-xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o xpc_uv.o
+xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o
xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o
xpc-$(CONFIG_X86_64) += xpc_uv.o
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 859a5281c61..ed1722e5004 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -19,7 +19,11 @@
#include <asm/system.h>
#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
#define is_shub() ia64_platform_is("sn2")
+#ifdef CONFIG_IA64_SGI_UV
#define is_uv() ia64_platform_is("uv")
+#else
+#define is_uv() 0
+#endif
#endif
#ifdef CONFIG_X86_64
#include <asm/genapic.h>
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 46325fc8481..e8d5cfbd32c 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -1104,7 +1104,7 @@ xpc_do_exit(enum xp_retval reason)
if (is_shub())
xpc_exit_sn2();
- else
+ else if (is_uv())
xpc_exit_uv();
}
@@ -1363,7 +1363,7 @@ out_2:
out_1:
if (is_shub())
xpc_exit_sn2();
- else
+ else if (is_uv())
xpc_exit_uv();
return ret;
}
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 5a97d3a9d74..f483c4221f7 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -2315,8 +2315,10 @@ end:
*/
static int sony_pic_disable(struct acpi_device *device)
{
- if (ACPI_FAILURE(acpi_evaluate_object(device->handle,
- "_DIS", NULL, NULL)))
+ acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL,
+ NULL);
+
+ if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND)
return -ENXIO;
dprintk("Device disabled\n");
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
index 37d131d03f3..45e5b106af7 100644
--- a/drivers/rtc/rtc-ds3234.c
+++ b/drivers/rtc/rtc-ds3234.c
@@ -189,7 +189,7 @@ static const struct rtc_class_ops ds3234_rtc_ops = {
.set_time = ds3234_set_time,
};
-static int ds3234_probe(struct spi_device *spi)
+static int __devinit ds3234_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
unsigned char tmp;
@@ -249,7 +249,7 @@ static int ds3234_probe(struct spi_device *spi)
return 0;
}
-static int __exit ds3234_remove(struct spi_device *spi)
+static int __devexit ds3234_remove(struct spi_device *spi)
{
struct ds3234 *chip = platform_get_drvdata(spi);
struct rtc_device *rtc = chip->rtc;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 910bc704939..f59277bbeda 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -455,6 +455,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_setfreq(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, 1);
+
/* register RTC and exit */
rtc = rtc_device_register("s3c", &pdev->dev, &s3c_rtcops,
@@ -507,7 +509,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
-static struct platform_driver s3c2410_rtcdrv = {
+static struct platform_driver s3c2410_rtc_driver = {
.probe = s3c_rtc_probe,
.remove = __devexit_p(s3c_rtc_remove),
.suspend = s3c_rtc_suspend,
@@ -523,12 +525,12 @@ static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics
static int __init s3c_rtc_init(void)
{
printk(banner);
- return platform_driver_register(&s3c2410_rtcdrv);
+ return platform_driver_register(&s3c2410_rtc_driver);
}
static void __exit s3c_rtc_exit(void)
{
- platform_driver_unregister(&s3c2410_rtcdrv);
+ platform_driver_unregister(&s3c2410_rtc_driver);
}
module_init(s3c_rtc_init);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 64b3d30027b..b92947d62ad 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2118,7 +2118,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
height, width);
}
-static __inline__ void updatescrollmode(struct display *p,
+static void updatescrollmode(struct display *p,
struct fb_info *info,
struct vc_data *vc)
{
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index cd5f20da738..6048b55f287 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1262,8 +1262,8 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FBIOPUT_CON2FBMAP:
arg = (unsigned long) compat_ptr(arg);
case FBIOBLANK:
- ret = fb_ioctl(file, cmd, arg);
- break;
+ mutex_unlock(&info->lock);
+ return fb_ioctl(file, cmd, arg);
case FBIOGET_FSCREENINFO:
ret = fb_get_fscreeninfo(inode, file, cmd, arg);
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index 8e5263c5b81..7543d5f7e30 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -38,7 +38,6 @@
#include "iface.h"
#include "viafbdev.h"
#include "chip.h"
-#include "debug.h"
#include "accel.h"
#include "share.h"
#include "dvi.h"
@@ -48,12 +47,10 @@
#include "lcd.h"
#include "ioctl.h"
-#include "viamode.h"
#include "via_utility.h"
#include "vt1636.h"
#include "tblDPASetting.h"
#include "tbl1636.h"
-#include "viafbdev.h"
/* External struct*/
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 06db79d05c1..6046239465a 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1251,6 +1251,7 @@ struct kmem_cache *ecryptfs_header_cache_2;
/**
* ecryptfs_write_headers_virt
* @page_virt: The virtual address to write the headers to
+ * @max: The size of memory allocated at page_virt
* @size: Set to the number of bytes written by this function
* @crypt_stat: The cryptographic context
* @ecryptfs_dentry: The eCryptfs dentry
@@ -1278,7 +1279,8 @@ struct kmem_cache *ecryptfs_header_cache_2;
*
* Returns zero on success
*/
-static int ecryptfs_write_headers_virt(char *page_virt, size_t *size,
+static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
+ size_t *size,
struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry)
{
@@ -1296,7 +1298,7 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t *size,
offset += written;
rc = ecryptfs_generate_key_packet_set((page_virt + offset), crypt_stat,
ecryptfs_dentry, &written,
- PAGE_CACHE_SIZE - offset);
+ max - offset);
if (rc)
ecryptfs_printk(KERN_WARNING, "Error generating key packet "
"set; rc = [%d]\n", rc);
@@ -1368,14 +1370,14 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
goto out;
}
/* Released in this function */
- virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL);
+ virt = (char *)get_zeroed_page(GFP_KERNEL);
if (!virt) {
printk(KERN_ERR "%s: Out of memory\n", __func__);
rc = -ENOMEM;
goto out;
}
- rc = ecryptfs_write_headers_virt(virt, &size, crypt_stat,
- ecryptfs_dentry);
+ rc = ecryptfs_write_headers_virt(virt, PAGE_CACHE_SIZE, &size,
+ crypt_stat, ecryptfs_dentry);
if (unlikely(rc)) {
printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
__func__, rc);
@@ -1393,8 +1395,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
goto out_free;
}
out_free:
- memset(virt, 0, crypt_stat->num_header_bytes_at_front);
- kfree(virt);
+ free_page((unsigned long)virt);
out:
return rc;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 19eafbe3c37..2b2eec1283b 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -175,7 +175,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
if (rw == WRITE) {
/*
- * FIXME: blockdev_direct_IO() doesn't use ->prepare_write(),
+ * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
* so we need to update the ->mmu_private to block boundary.
*
* But we must fill the remaining area or hole by nul for
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index d15cd6e7251..60d4c32c880 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -860,7 +860,6 @@ out:
* int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences
* @handle: transaction
* @bh: buffer to undo
- * @credits: store the number of taken credits here (if not NULL)
*
* Sometimes there is a need to distinguish between metadata which has
* been committed to disk and that which has not. The ext3fs code uses
diff --git a/fs/libfs.c b/fs/libfs.c
index 74688598bcf..e960a832190 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -814,7 +814,7 @@ EXPORT_SYMBOL(simple_getattr);
EXPORT_SYMBOL(simple_link);
EXPORT_SYMBOL(simple_lookup);
EXPORT_SYMBOL(simple_pin_fs);
-EXPORT_SYMBOL(simple_prepare_write);
+EXPORT_UNUSED_SYMBOL(simple_prepare_write);
EXPORT_SYMBOL(simple_readpage);
EXPORT_SYMBOL(simple_release_fs);
EXPORT_SYMBOL(simple_rename);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8d3225a7807..7efe937a415 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -679,8 +679,7 @@ leave:
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
- * worry about recursive locking in ->prepare_write() and
- * ->commit_write(). */
+ * worry about recursive locking in ->write_begin() and ->write_end(). */
static int ocfs2_write_zero_page(struct inode *inode,
u64 size)
{
diff --git a/fs/splice.c b/fs/splice.c
index a1e701c2715..1abab5cee4b 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -731,8 +731,8 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
};
/*
- * The actor worker might be calling ->prepare_write and
- * ->commit_write. Most of the time, these expect i_mutex to
+ * The actor worker might be calling ->write_begin and
+ * ->write_end. Most of the time, these expect i_mutex to
* be held. Since this may result in an ABBA deadlock with
* pipe->inode, we have to order lock acquiry here.
*/
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8b00f6643e9..1164963c3a8 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -25,7 +25,6 @@ struct cgroup;
extern int cgroup_init_early(void);
extern int cgroup_init(void);
-extern void cgroup_init_smp(void);
extern void cgroup_lock(void);
extern bool cgroup_lock_live_group(struct cgroup *cgrp);
extern void cgroup_unlock(void);
@@ -348,8 +347,6 @@ struct cgroup_subsys {
struct cgroupfs_root *root;
struct list_head sibling;
-
- void *private;
};
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -410,7 +407,6 @@ void cgroup_mm_owner_callbacks(struct task_struct *old,
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
-static inline void cgroup_init_smp(void) {}
static inline void cgroup_fork(struct task_struct *p) {}
static inline void cgroup_fork_callbacks(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {}
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 8f225339eee..5a361f85cfe 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -44,11 +44,6 @@ static inline bool should_send_signal(struct task_struct *p)
return !(p->flags & PF_FREEZER_NOSIG);
}
-/*
- * Wake up a frozen process
- */
-extern int __thaw_process(struct task_struct *p);
-
/* Takes and releases task alloc lock using task_lock() */
extern int thaw_process(struct task_struct *p);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5b248d61430..0dcdd9458f4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -489,13 +489,6 @@ struct address_space_operations {
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
- /*
- * ext3 requires that a successful prepare_write() call be followed
- * by a commit_write() call - they must be balanced
- */
- int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
- int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
-
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 396a350b87a..fba141d3ca0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -116,6 +116,8 @@ extern int _cond_resched(void);
# define might_resched() do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ void __might_sleep(char *file, int line);
/**
* might_sleep - annotation for functions that can sleep
*
@@ -126,8 +128,6 @@ extern int _cond_resched(void);
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
-#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- void __might_sleep(char *file, int line);
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
#else
diff --git a/include/linux/resource.h b/include/linux/resource.h
index aaa423a6f3d..40fc7e62608 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -59,10 +59,10 @@ struct rlimit {
#define _STK_LIM (8*1024*1024)
/*
- * GPG wants 32kB of mlocked memory, to make sure pass phrases
+ * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
* and other sensitive information are never written to disk.
*/
-#define MLOCK_LIMIT (8 * PAGE_SIZE)
+#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
/*
* Due to binary compatibility, the actual resource numbers
diff --git a/include/linux/security.h b/include/linux/security.h
index f5c4a51eb42..c13f1cec9ab 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1585,6 +1585,7 @@ int security_syslog(int type);
int security_settime(struct timespec *ts, struct timezone *tz);
int security_vm_enough_memory(long pages);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
+int security_vm_enough_memory_kern(long pages);
int security_bprm_alloc(struct linux_binprm *bprm);
void security_bprm_free(struct linux_binprm *bprm);
void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
@@ -1820,6 +1821,11 @@ static inline int security_vm_enough_memory(long pages)
return cap_vm_enough_memory(current->mm, pages);
}
+static inline int security_vm_enough_memory_kern(long pages)
+{
+ return cap_vm_enough_memory(current->mm, pages);
+}
+
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
return cap_vm_enough_memory(mm, pages);
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index b8db32cea1d..bf8de281b4e 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -18,6 +18,9 @@
* duplex (MicroWire) controllers. Provide chipslect() and txrx_bufs(),
* and custom setup()/cleanup() methods.
*/
+
+#include <linux/workqueue.h>
+
struct spi_bitbang {
struct workqueue_struct *workqueue;
struct work_struct work;
diff --git a/init/Kconfig b/init/Kconfig
index 44e9208f9c7..86b00c53fad 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -401,16 +401,20 @@ config CGROUP_MEM_RES_CTLR
depends on CGROUPS && RESOURCE_COUNTERS
select MM_OWNER
help
- Provides a memory resource controller that manages both page cache and
- RSS memory.
+ Provides a memory resource controller that manages both anonymous
+ memory and page cache. (See Documentation/controllers/memory.txt)
Note that setting this option increases fixed memory overhead
- associated with each page of memory in the system by 4/8 bytes
- and also increases cache misses because struct page on many 64bit
- systems will not fit into a single cache line anymore.
+ associated with each page of memory in the system. By this,
+ 20(40)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory
+ usage tracking struct at boot. Total amount of this is printed out
+ at boot.
Only enable when you're ok with these trade offs and really
- sure you need the memory resource controller.
+ sure you need the memory resource controller. Even when you enable
+ this, you can set "cgroup_disable=memory" at your boot option to
+ disable memory resource controller and you can avoid overheads.
+ (and lose benefits of memory resource contoller)
This config option also selects MM_OWNER config option, which
could in turn add some fork/exit overhead.
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 4c87ee1fe5d..4d42f450b59 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -1,4 +1,4 @@
-
+#include <linux/delay.h>
#include <linux/raid/md.h>
#include <linux/delay.h>
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e9505695449..7fa476f01d0 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -162,9 +162,13 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
struct task_struct *task)
{
struct freezer *freezer;
- int retval;
- /* Anything frozen can't move or be moved to/from */
+ /*
+ * Anything frozen can't move or be moved to/from.
+ *
+ * Since orig_freezer->state == FROZEN means that @task has been
+ * frozen, so it's sufficient to check the latter condition.
+ */
if (is_task_frozen_enough(task))
return -EBUSY;
@@ -173,13 +177,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
if (freezer->state == CGROUP_FROZEN)
return -EBUSY;
- retval = 0;
- task_lock(task);
- freezer = task_freezer(task);
- if (freezer->state == CGROUP_FROZEN)
- retval = -EBUSY;
- task_unlock(task);
- return retval;
+ return 0;
}
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -190,8 +188,9 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
freezer = task_freezer(task);
task_unlock(task);
- BUG_ON(freezer->state == CGROUP_FROZEN);
spin_lock_irq(&freezer->lock);
+ BUG_ON(freezer->state == CGROUP_FROZEN);
+
/* Locking avoids race with FREEZING -> THAWED transitions. */
if (freezer->state == CGROUP_FREEZING)
freeze_task(task, true);
@@ -276,25 +275,18 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
return num_cant_freeze_now ? -EBUSY : 0;
}
-static int unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
+static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
{
struct cgroup_iter it;
struct task_struct *task;
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
- int do_wake;
-
- task_lock(task);
- do_wake = __thaw_process(task);
- task_unlock(task);
- if (do_wake)
- wake_up_process(task);
+ thaw_process(task);
}
cgroup_iter_end(cgroup, &it);
- freezer->state = CGROUP_THAWED;
- return 0;
+ freezer->state = CGROUP_THAWED;
}
static int freezer_change_state(struct cgroup *cgroup,
@@ -304,27 +296,22 @@ static int freezer_change_state(struct cgroup *cgroup,
int retval = 0;
freezer = cgroup_freezer(cgroup);
+
spin_lock_irq(&freezer->lock);
+
update_freezer_state(cgroup, freezer);
if (goal_state == freezer->state)
goto out;
- switch (freezer->state) {
+
+ switch (goal_state) {
case CGROUP_THAWED:
- retval = try_to_freeze_cgroup(cgroup, freezer);
+ unfreeze_cgroup(cgroup, freezer);
break;
- case CGROUP_FREEZING:
- if (goal_state == CGROUP_FROZEN) {
- /* Userspace is retrying after
- * "/bin/echo FROZEN > freezer.state" returned -EBUSY */
- retval = try_to_freeze_cgroup(cgroup, freezer);
- break;
- }
- /* state == FREEZING and goal_state == THAWED, so unfreeze */
case CGROUP_FROZEN:
- retval = unfreeze_cgroup(cgroup, freezer);
+ retval = try_to_freeze_cgroup(cgroup, freezer);
break;
default:
- break;
+ BUG();
}
out:
spin_unlock_irq(&freezer->lock);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index ba6248b323e..2f4936cf708 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -121,16 +121,7 @@ void cancel_freezing(struct task_struct *p)
}
}
-/*
- * Wake up a frozen process
- *
- * task_lock() is needed to prevent the race with refrigerator() which may
- * occur if the freezing of tasks fails. Namely, without the lock, if the
- * freezing of tasks failed, thaw_tasks() might have run before a task in
- * refrigerator() could call frozen_process(), in which case the task would be
- * frozen and no one would thaw it.
- */
-int __thaw_process(struct task_struct *p)
+static int __thaw_process(struct task_struct *p)
{
if (frozen(p)) {
p->flags &= ~PF_FROZEN;
@@ -140,6 +131,15 @@ int __thaw_process(struct task_struct *p)
return 0;
}
+/*
+ * Wake up a frozen process
+ *
+ * task_lock() is needed to prevent the race with refrigerator() which may
+ * occur if the freezing of tasks fails. Namely, without the lock, if the
+ * freezing of tasks failed, thaw_tasks() might have run before a task in
+ * refrigerator() could call frozen_process(), in which case the task would be
+ * frozen and no one would thaw it.
+ */
int thaw_process(struct task_struct *p)
{
task_lock(p);
diff --git a/kernel/profile.c b/kernel/profile.c
index a9e422df6bf..9830a037d8d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -102,7 +102,7 @@ int profile_setup(char *str)
__setup("profile=", profile_setup);
-int profile_init(void)
+int __ref profile_init(void)
{
int buffer_bytes;
if (!prof_on)
diff --git a/kernel/signal.c b/kernel/signal.c
index 105217da5c8..4530fc65445 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1144,7 +1144,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
struct task_struct * p;
for_each_process(p) {
- if (p->pid > 1 && !same_thread_group(p, current)) {
+ if (task_pid_vnr(p) > 1 &&
+ !same_thread_group(p, current)) {
int err = group_send_sig_info(sig, info, p);
++count;
if (err != -EPERM)
diff --git a/mm/filemap.c b/mm/filemap.c
index ab8553658af..f3e5f8944d1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2029,48 +2029,8 @@ int pagecache_write_begin(struct file *file, struct address_space *mapping,
{
const struct address_space_operations *aops = mapping->a_ops;
- if (aops->write_begin) {
- return aops->write_begin(file, mapping, pos, len, flags,
+ return aops->write_begin(file, mapping, pos, len, flags,
pagep, fsdata);
- } else {
- int ret;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
- struct inode *inode = mapping->host;
- struct page *page;
-again:
- page = __grab_cache_page(mapping, index);
- *pagep = page;
- if (!page)
- return -ENOMEM;
-
- if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
- /*
- * There is no way to resolve a short write situation
- * for a !Uptodate page (except by double copying in
- * the caller done by generic_perform_write_2copy).
- *
- * Instead, we have to bring it uptodate here.
- */
- ret = aops->readpage(file, page);
- page_cache_release(page);
- if (ret) {
- if (ret == AOP_TRUNCATED_PAGE)
- goto again;
- return ret;
- }
- goto again;
- }
-
- ret = aops->prepare_write(file, page, offset, offset+len);
- if (ret) {
- unlock_page(page);
- page_cache_release(page);
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
- }
- return ret;
- }
}
EXPORT_SYMBOL(pagecache_write_begin);
@@ -2079,32 +2039,9 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
- int ret;
-
- if (aops->write_end) {
- mark_page_accessed(page);
- ret = aops->write_end(file, mapping, pos, len, copied,
- page, fsdata);
- } else {
- unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
- struct inode *inode = mapping->host;
-
- flush_dcache_page(page);
- ret = aops->commit_write(file, page, offset, offset+len);
- unlock_page(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- if (ret < 0) {
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
- } else if (ret > 0)
- ret = min_t(size_t, copied, ret);
- else
- ret = copied;
- }
- return ret;
+ mark_page_accessed(page);
+ return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
}
EXPORT_SYMBOL(pagecache_write_end);
@@ -2226,174 +2163,6 @@ repeat:
}
EXPORT_SYMBOL(__grab_cache_page);
-static ssize_t generic_perform_write_2copy(struct file *file,
- struct iov_iter *i, loff_t pos)
-{
- struct address_space *mapping = file->f_mapping;
- const struct address_space_operations *a_ops = mapping->a_ops;
- struct inode *inode = mapping->host;
- long status = 0;
- ssize_t written = 0;
-
- do {
- struct page *src_page;
- struct page *page;
- pgoff_t index; /* Pagecache index for current page */
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
- size_t copied; /* Bytes copied from user */
-
- offset = (pos & (PAGE_CACHE_SIZE - 1));
- index = pos >> PAGE_CACHE_SHIFT;
- bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
- iov_iter_count(i));
-
- /*
- * a non-NULL src_page indicates that we're doing the
- * copy via get_user_pages and kmap.
- */
- src_page = NULL;
-
- /*
- * Bring in the user page that we will copy from _first_.
- * Otherwise there's a nasty deadlock on copying from the
- * same page as we're writing to, without it being marked
- * up-to-date.
- *
- * Not only is this an optimisation, but it is also required
- * to check that the address is actually valid, when atomic
- * usercopies are used, below.
- */
- if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
- status = -EFAULT;
- break;
- }
-
- page = __grab_cache_page(mapping, index);
- if (!page) {
- status = -ENOMEM;
- break;
- }
-
- /*
- * non-uptodate pages cannot cope with short copies, and we
- * cannot take a pagefault with the destination page locked.
- * So pin the source page to copy it.
- */
- if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
- unlock_page(page);
-
- src_page = alloc_page(GFP_KERNEL);
- if (!src_page) {
- page_cache_release(page);
- status = -ENOMEM;
- break;
- }
-
- /*
- * Cannot get_user_pages with a page locked for the
- * same reason as we can't take a page fault with a
- * page locked (as explained below).
- */
- copied = iov_iter_copy_from_user(src_page, i,
- offset, bytes);
- if (unlikely(copied == 0)) {
- status = -EFAULT;
- page_cache_release(page);
- page_cache_release(src_page);
- break;
- }
- bytes = copied;
-
- lock_page(page);
- /*
- * Can't handle the page going uptodate here, because
- * that means we would use non-atomic usercopies, which
- * zero out the tail of the page, which can cause
- * zeroes to become transiently visible. We could just
- * use a non-zeroing copy, but the APIs aren't too
- * consistent.
- */
- if (unlikely(!page->mapping || PageUptodate(page))) {
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(src_page);
- continue;
- }
- }
-
- status = a_ops->prepare_write(file, page, offset, offset+bytes);
- if (unlikely(status))
- goto fs_write_aop_error;
-
- if (!src_page) {
- /*
- * Must not enter the pagefault handler here, because
- * we hold the page lock, so we might recursively
- * deadlock on the same lock, or get an ABBA deadlock
- * against a different lock, or against the mmap_sem
- * (which nests outside the page lock). So increment
- * preempt count, and use _atomic usercopies.
- *
- * The page is uptodate so we are OK to encounter a
- * short copy: if unmodified parts of the page are
- * marked dirty and written out to disk, it doesn't
- * really matter.
- */
- pagefault_disable();
- copied = iov_iter_copy_from_user_atomic(page, i,
- offset, bytes);
- pagefault_enable();
- } else {
- void *src, *dst;
- src = kmap_atomic(src_page, KM_USER0);
- dst = kmap_atomic(page, KM_USER1);
- memcpy(dst + offset, src + offset, bytes);
- kunmap_atomic(dst, KM_USER1);
- kunmap_atomic(src, KM_USER0);
- copied = bytes;
- }
- flush_dcache_page(page);
-
- status = a_ops->commit_write(file, page, offset, offset+bytes);
- if (unlikely(status < 0))
- goto fs_write_aop_error;
- if (unlikely(status > 0)) /* filesystem did partial write */
- copied = min_t(size_t, copied, status);
-
- unlock_page(page);
- mark_page_accessed(page);
- page_cache_release(page);
- if (src_page)
- page_cache_release(src_page);
-
- iov_iter_advance(i, copied);
- pos += copied;
- written += copied;
-
- balance_dirty_pages_ratelimited(mapping);
- cond_resched();
- continue;
-
-fs_write_aop_error:
- unlock_page(page);
- page_cache_release(page);
- if (src_page)
- page_cache_release(src_page);
-
- /*
- * prepare_write() may have instantiated a few blocks
- * outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
- */
- if (pos + bytes > inode->i_size)
- vmtruncate(inode, inode->i_size);
- break;
- } while (iov_iter_count(i));
-
- return written ? written : status;
-}
-
static ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
@@ -2494,10 +2263,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
struct iov_iter i;
iov_iter_init(&i, iov, nr_segs, count, written);
- if (a_ops->write_begin)
- status = generic_perform_write(file, &i, pos);
- else
- status = generic_perform_write_2copy(file, &i, pos);
+ status = generic_perform_write(file, &i, pos);
if (likely(status >= 0)) {
written += status;
diff --git a/mm/mmap.c b/mm/mmap.c
index 74f4d158022..de14ac21e5b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -175,7 +175,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
/* Don't let a single process grow too big:
leave 3% of the size of this process for other processes */
- allowed -= mm->total_vm / 32;
+ if (mm)
+ allowed -= mm->total_vm / 32;
/*
* cast `allowed' as a signed long because vm_committed_space
diff --git a/mm/nommu.c b/mm/nommu.c
index 2696b24f2bb..7695dc85078 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1454,7 +1454,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
/* Don't let a single process grow too big:
leave 3% of the size of this process for other processes */
- allowed -= current->mm->total_vm / 32;
+ if (mm)
+ allowed -= mm->total_vm / 32;
/*
* cast `allowed' as a signed long because vm_committed_space
diff --git a/mm/shmem.c b/mm/shmem.c
index d38d7e61fcd..0ed075215e5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -161,8 +161,8 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
*/
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
- return (flags & VM_ACCOUNT)?
- security_vm_enough_memory(VM_ACCT(size)): 0;
+ return (flags & VM_ACCOUNT) ?
+ security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
}
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
@@ -179,8 +179,8 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
*/
static inline int shmem_acct_block(unsigned long flags)
{
- return (flags & VM_ACCOUNT)?
- 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
+ return (flags & VM_ACCOUNT) ?
+ 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
}
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 036536945dd..f1cc03bbf6a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -897,7 +897,8 @@ EXPORT_SYMBOL(vm_unmap_ram);
* @count: number of pages
* @node: prefer to allocate data structures on this node
* @prot: memory protection to use. PAGE_KERNEL for regular RAM
- * @returns: a pointer to the address that has been mapped, or NULL on failure
+ *
+ * Returns: a pointer to the address that has been mapped, or %NULL on failure
*/
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
diff --git a/security/security.c b/security/security.c
index 255b08559b2..c0acfa7177e 100644
--- a/security/security.c
+++ b/security/security.c
@@ -198,14 +198,23 @@ int security_settime(struct timespec *ts, struct timezone *tz)
int security_vm_enough_memory(long pages)
{
+ WARN_ON(current->mm == NULL);
return security_ops->vm_enough_memory(current->mm, pages);
}
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
+ WARN_ON(mm == NULL);
return security_ops->vm_enough_memory(mm, pages);
}
+int security_vm_enough_memory_kern(long pages)
+{
+ /* If current->mm is a kernel thread then we will pass NULL,
+ for this specific case that is fine */
+ return security_ops->vm_enough_memory(current->mm, pages);
+}
+
int security_bprm_alloc(struct linux_binprm *bprm)
{
return security_ops->bprm_alloc_security(bprm);