aboutsummaryrefslogtreecommitdiff
path: root/fs/nfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/file.c')
-rw-r--r--fs/nfs/file.c172
1 files changed, 133 insertions, 39 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 579cf8a7d4a..d29f90d00aa 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -33,6 +33,7 @@
#include <asm/system.h>
#include "delegation.h"
+#include "internal.h"
#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_FILE
@@ -55,6 +56,8 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
+static struct vm_operations_struct nfs_file_vm_ops;
+
const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
.read = do_sync_read,
@@ -174,13 +177,38 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
}
/*
+ * Helper for nfs_file_flush() and nfs_fsync()
+ *
+ * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
+ * disk, but it retrieves and clears ctx->error after synching, despite
+ * the two being set at the same time in nfs_context_set_write_error().
+ * This is because the former is used to notify the _next_ call to
+ * nfs_file_write() that a write error occured, and hence cause it to
+ * fall back to doing a synchronous write.
+ */
+static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode)
+{
+ int have_error, status;
+ int ret = 0;
+
+ have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ status = nfs_wb_all(inode);
+ have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ if (have_error)
+ ret = xchg(&ctx->error, 0);
+ if (!ret)
+ ret = status;
+ return ret;
+}
+
+/*
* Flush all dirty pages, and check for write errors.
*
*/
static int
nfs_file_flush(struct file *file, fl_owner_t id)
{
- struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = file->f_path.dentry->d_inode;
int status;
@@ -189,16 +217,11 @@ nfs_file_flush(struct file *file, fl_owner_t id)
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
- lock_kernel();
+
/* Ensure that data+attribute caches are up to date after close() */
- status = nfs_wb_all(inode);
- if (!status) {
- status = ctx->error;
- ctx->error = 0;
- if (!status)
- nfs_revalidate_inode(NFS_SERVER(inode), inode);
- }
- unlock_kernel();
+ status = nfs_do_fsync(ctx, inode);
+ if (!status)
+ nfs_revalidate_inode(NFS_SERVER(inode), inode);
return status;
}
@@ -257,8 +280,11 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
dentry->d_parent->d_name.name, dentry->d_name.name);
status = nfs_revalidate_mapping(inode, file->f_mapping);
- if (!status)
- status = generic_file_mmap(file, vma);
+ if (!status) {
+ vma->vm_ops = &nfs_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+ file_accessed(file);
+ }
return status;
}
@@ -270,45 +296,60 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
static int
nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
- struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = dentry->d_inode;
- int status;
dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
- lock_kernel();
- status = nfs_wb_all(inode);
- if (!status) {
- status = ctx->error;
- ctx->error = 0;
- }
- unlock_kernel();
- return status;
+ return nfs_do_fsync(ctx, inode);
}
/*
- * This does the "real" work of the write. The generic routine has
- * allocated the page, locked it, done all the page alignment stuff
- * calculations etc. Now we should just copy the data from user
- * space and write it back to the real medium..
+ * This does the "real" work of the write. We must allocate and lock the
+ * page to be sent back to the generic routine, which then copies the
+ * data from user space.
*
* If the writer ends up delaying the write, the writer needs to
* increment the page use counts until he is done with the page.
*/
-static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+static int nfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
{
- return nfs_flush_incompatible(file, page);
+ int ret;
+ pgoff_t index;
+ struct page *page;
+ index = pos >> PAGE_CACHE_SHIFT;
+
+ page = __grab_cache_page(mapping, index);
+ if (!page)
+ return -ENOMEM;
+ *pagep = page;
+
+ ret = nfs_flush_incompatible(file, page);
+ if (ret) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ return ret;
}
-static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+static int nfs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
{
- long status;
+ unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ int status;
lock_kernel();
- status = nfs_updatepage(file, page, offset, to-offset);
+ status = nfs_updatepage(file, page, offset, copied);
unlock_kernel();
- return status;
+
+ unlock_page(page);
+ page_cache_release(page);
+
+ return status < 0 ? status : copied;
}
static void nfs_invalidate_page(struct page *page, unsigned long offset)
@@ -333,11 +374,11 @@ static int nfs_launder_page(struct page *page)
const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.readpages = nfs_readpages,
- .set_page_dirty = nfs_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.writepage = nfs_writepage,
.writepages = nfs_writepages,
- .prepare_write = nfs_prepare_write,
- .commit_write = nfs_commit_write,
+ .write_begin = nfs_write_begin,
+ .write_end = nfs_write_end,
.invalidatepage = nfs_invalidate_page,
.releasepage = nfs_release_page,
#ifdef CONFIG_NFS_DIRECTIO
@@ -346,6 +387,60 @@ const struct address_space_operations nfs_file_aops = {
.launder_page = nfs_launder_page,
};
+static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+ struct file *filp = vma->vm_file;
+ unsigned pagelen;
+ int ret = -EINVAL;
+ void *fsdata;
+ struct address_space *mapping;
+ loff_t offset;
+
+ lock_page(page);
+ mapping = page->mapping;
+ if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) {
+ unlock_page(page);
+ return -EINVAL;
+ }
+ pagelen = nfs_page_length(page);
+ offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ unlock_page(page);
+
+ /*
+ * we can use mapping after releasing the page lock, because:
+ * we hold mmap_sem on the fault path, which should pin the vma
+ * which should pin the file, which pins the dentry which should
+ * hold a reference on inode.
+ */
+
+ if (pagelen) {
+ struct page *page2 = NULL;
+ ret = nfs_write_begin(filp, mapping, offset, pagelen,
+ 0, &page2, &fsdata);
+ if (!ret)
+ ret = nfs_write_end(filp, mapping, offset, pagelen,
+ pagelen, page2, fsdata);
+ }
+ return ret;
+}
+
+static struct vm_operations_struct nfs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = nfs_vm_page_mkwrite,
+};
+
+static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+{
+ struct nfs_open_context *ctx;
+
+ if (IS_SYNC(inode) || (filp->f_flags & O_SYNC))
+ return 1;
+ ctx = nfs_file_open_context(filp);
+ if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags))
+ return 1;
+ return 0;
+}
+
static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
@@ -382,8 +477,8 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
result = generic_file_aio_write(iocb, iov, nr_segs, pos);
/* Return error values for O_SYNC and IS_SYNC() */
- if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) {
- int err = nfs_fsync(iocb->ki_filp, dentry, 1);
+ if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
+ int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
if (err < 0)
result = err;
}
@@ -522,8 +617,7 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
nfs_inc_stats(inode, NFSIOS_VFSLOCK);
/* No mandatory locks over NFS */
- if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
- fl->fl_type != F_UNLCK)
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
return -ENOLCK;
if (IS_GETLK(cmd))