aboutsummaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c119
1 files changed, 76 insertions, 43 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index f4d0cded0e1..07e9d9258b4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -28,11 +28,11 @@
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
-#include <linux/backing-dev.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
+#include <linux/memcontrol.h>
#include "internal.h"
/*
@@ -65,7 +65,6 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
- * ->zone.lock
*
* ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
@@ -119,6 +118,7 @@ void __remove_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
+ mem_cgroup_uncharge_page(page);
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
mapping->nrpages--;
@@ -185,6 +185,12 @@ static int sync_page(void *word)
return 0;
}
+static int sync_page_killable(void *word)
+{
+ sync_page(word);
+ return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
@@ -337,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range);
/**
- * sync_page_range_nolock
+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking
* @inode: target inode
* @mapping: target address_space
* @pos: beginning offset in pages to write
@@ -453,8 +459,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
- int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ int error = mem_cgroup_cache_charge(page, current->mm,
+ gfp_mask & ~__GFP_HIGHMEM);
+ if (error)
+ goto out;
+ error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
write_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page);
@@ -465,10 +475,14 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
page->index = offset;
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
- }
+ } else
+ mem_cgroup_uncharge_page(page);
+
write_unlock_irq(&mapping->tree_lock);
radix_tree_preload_end();
- }
+ } else
+ mem_cgroup_uncharge_page(page);
+out:
return error;
}
EXPORT_SYMBOL(add_to_page_cache);
@@ -522,7 +536,7 @@ static inline void wake_up_page(struct page *page, int bit)
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
-void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
@@ -546,7 +560,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()).
*/
-void fastcall unlock_page(struct page *page)
+void unlock_page(struct page *page)
{
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
@@ -580,7 +594,7 @@ EXPORT_SYMBOL(end_page_writeback);
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
-void fastcall __lock_page(struct page *page)
+void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
@@ -589,11 +603,22 @@ void fastcall __lock_page(struct page *page)
}
EXPORT_SYMBOL(__lock_page);
-/*
+int __lock_page_killable(struct page *page)
+{
+ DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+ return __wait_on_bit_lock(page_waitqueue(page), &wait,
+ sync_page_killable, TASK_KILLABLE);
+}
+
+/**
+ * __lock_page_nosync - get a lock on the page, without calling sync_page()
+ * @page: the page to lock
+ *
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
-void fastcall __lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -852,9 +877,7 @@ static void shrink_readahead_size_eio(struct file *filp,
}
/**
- * do_generic_mapping_read - generic file read routine
- * @mapping: address_space to be read
- * @ra: file's readahead state
+ * do_generic_file_read - generic file read routine
* @filp: the file to read
* @ppos: current file position
* @desc: read_descriptor
@@ -865,18 +888,13 @@ static void shrink_readahead_size_eio(struct file *filp,
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
- *
- * Note the struct file* is only passed for the use of readpage.
- * It may be NULL.
*/
-void do_generic_mapping_read(struct address_space *mapping,
- struct file_ra_state *ra,
- struct file *filp,
- loff_t *ppos,
- read_descriptor_t *desc,
- read_actor_t actor)
+static void do_generic_file_read(struct file *filp, loff_t *ppos,
+ read_descriptor_t *desc, read_actor_t actor)
{
+ struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
+ struct file_ra_state *ra = &filp->f_ra;
pgoff_t index;
pgoff_t last_index;
pgoff_t prev_index;
@@ -980,7 +998,8 @@ page_ok:
page_not_up_to_date:
/* Get exclusive access to the page ... */
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
@@ -1008,7 +1027,8 @@ readpage:
}
if (!PageUptodate(page)) {
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
@@ -1019,15 +1039,16 @@ readpage:
goto find_page;
}
unlock_page(page);
- error = -EIO;
shrink_readahead_size_eio(filp, ra);
- goto readpage_error;
+ goto readpage_eio;
}
unlock_page(page);
}
goto page_ok;
+readpage_eio:
+ error = -EIO;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
@@ -1065,7 +1086,6 @@ out:
if (filp)
file_accessed(filp);
}
-EXPORT_SYMBOL(do_generic_mapping_read);
int file_read_actor(read_descriptor_t *desc, struct page *page,
unsigned long offset, unsigned long size)
@@ -1260,7 +1280,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int fastcall page_cache_read(struct file * file, pgoff_t offset)
+static int page_cache_read(struct file *file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
@@ -1306,7 +1326,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
struct page *page;
- unsigned long size;
+ pgoff_t size;
int did_readaround = 0;
int ret = 0;
@@ -1521,9 +1541,20 @@ repeat:
return page;
}
-/*
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @filler: function to perform the read
+ * @data: destination for read data
+ *
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index,
@@ -1725,17 +1756,27 @@ size_t iov_iter_copy_from_user(struct page *page,
}
EXPORT_SYMBOL(iov_iter_copy_from_user);
-static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
{
+ BUG_ON(i->count < bytes);
+
if (likely(i->nr_segs == 1)) {
i->iov_offset += bytes;
+ i->count -= bytes;
} else {
const struct iovec *iov = i->iov;
size_t base = i->iov_offset;
- while (bytes) {
- int copy = min(bytes, iov->iov_len - base);
+ /*
+ * The !iov->iov_len check ensures we skip over unlikely
+ * zero-length segments (without overruning the iovec).
+ */
+ while (bytes || unlikely(!iov->iov_len && i->count)) {
+ int copy;
+ copy = min(bytes, iov->iov_len - base);
+ BUG_ON(!i->count || i->count < copy);
+ i->count -= copy;
bytes -= copy;
base += copy;
if (iov->iov_len == base) {
@@ -1747,14 +1788,6 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
i->iov_offset = base;
}
}
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
- BUG_ON(i->count < bytes);
-
- __iov_iter_advance_iov(i, bytes);
- i->count -= bytes;
-}
EXPORT_SYMBOL(iov_iter_advance);
/*
@@ -2251,6 +2284,7 @@ again:
cond_resched();
+ iov_iter_advance(i, copied);
if (unlikely(copied == 0)) {
/*
* If we were unable to copy any data at all, we must
@@ -2264,7 +2298,6 @@ again:
iov_iter_single_seg_count(i));
goto again;
}
- iov_iter_advance(i, copied);
pos += copied;
written += copied;