diff options
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/client.c | 2 | ||||
-rw-r--r-- | fs/nfs/direct.c | 10 | ||||
-rw-r--r-- | fs/nfs/file.c | 28 | ||||
-rw-r--r-- | fs/nfs/inode.c | 8 | ||||
-rw-r--r-- | fs/nfs/internal.h | 18 | ||||
-rw-r--r-- | fs/nfs/namespace.c | 8 | ||||
-rw-r--r-- | fs/nfs/nfs3proc.c | 52 | ||||
-rw-r--r-- | fs/nfs/nfs4_fs.h | 2 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 99 | ||||
-rw-r--r-- | fs/nfs/nfs4renewd.c | 5 | ||||
-rw-r--r-- | fs/nfs/nfsroot.c | 13 | ||||
-rw-r--r-- | fs/nfs/pagelist.c | 71 | ||||
-rw-r--r-- | fs/nfs/proc.c | 31 | ||||
-rw-r--r-- | fs/nfs/read.c | 183 | ||||
-rw-r--r-- | fs/nfs/symlink.c | 2 | ||||
-rw-r--r-- | fs/nfs/sysctl.c | 5 | ||||
-rw-r--r-- | fs/nfs/write.c | 604 |
17 files changed, 498 insertions, 643 deletions
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 5fea638743e..23ab145daa2 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname, INIT_LIST_HEAD(&clp->cl_state_owners); INIT_LIST_HEAD(&clp->cl_unused); spin_lock_init(&clp->cl_lock); - INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); + INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); clp->cl_boot_time = CURRENT_TIME; clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index bdfabf854a5..f9d678f4ae0 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -58,7 +58,7 @@ #define NFSDBG_FACILITY NFSDBG_VFS -static kmem_cache_t *nfs_direct_cachep; +static struct kmem_cache *nfs_direct_cachep; /* * This represents a set of asynchronous requests that we're waiting on @@ -143,7 +143,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) { struct nfs_direct_req *dreq; - dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); + dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); if (!dreq) return NULL; @@ -307,9 +307,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo data->task.tk_cookie = (unsigned long) inode; - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n", data->task.tk_pid, @@ -475,9 +473,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); } static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) @@ -641,9 +637,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l data->task.tk_priority = RPC_PRIORITY_NORMAL; data->task.tk_cookie = (unsigned long) inode; - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n", data->task.tk_pid, diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc93865cea9..8e28bffc35a 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -307,28 +307,28 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse static void nfs_invalidate_page(struct page *page, unsigned long offset) { - struct inode *inode = page->mapping->host; - + if (offset != 0) + return; /* Cancel any unstarted writes on this page */ - if (offset == 0) - nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE); + nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE); } static int nfs_release_page(struct page *page, gfp_t gfp) { - if (gfp & __GFP_FS) - return !nfs_wb_page(page->mapping->host, page); - else - /* - * Avoid deadlock on nfs_wait_on_request(). - */ + /* + * Avoid deadlock on nfs_wait_on_request(). + */ + if (!(gfp & __GFP_FS)) return 0; + /* Hack... Force nfs_wb_page() to write out the page */ + SetPageDirty(page); + return !nfs_wb_page(page->mapping->host, page); } const struct address_space_operations nfs_file_aops = { .readpage = nfs_readpage, .readpages = nfs_readpages, - .set_page_dirty = __set_page_dirty_nobuffers, + .set_page_dirty = nfs_set_page_dirty, .writepage = nfs_writepage, .writepages = nfs_writepages, .prepare_write = nfs_prepare_write, @@ -375,6 +375,12 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); result = generic_file_aio_write(iocb, iov, nr_segs, pos); + /* Return error values for O_SYNC and IS_SYNC() */ + if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) { + int err = nfs_fsync(iocb->ki_filp, dentry, 1); + if (err < 0) + result = err; + } out: return result; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 08cc4c5919a..36680d1061b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -55,7 +55,7 @@ static int nfs_update_inode(struct inode *, struct nfs_fattr *); static void nfs_zap_acl_cache(struct inode *); -static kmem_cache_t * nfs_inode_cachep; +static struct kmem_cache * nfs_inode_cachep; static inline unsigned long nfs_fattr_to_ino_t(struct nfs_fattr *fattr) @@ -422,7 +422,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) int err; /* Flush out writes to the server in order to update c/mtime */ - nfs_sync_inode_wait(inode, 0, 0, FLUSH_NOCOMMIT); + nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT); /* * We may force a getattr if the user cares about atime. @@ -1080,7 +1080,7 @@ void nfs4_clear_inode(struct inode *inode) struct inode *nfs_alloc_inode(struct super_block *sb) { struct nfs_inode *nfsi; - nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL); + nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL); if (!nfsi) return NULL; nfsi->flags = 0UL; @@ -1111,7 +1111,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) #endif } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct nfs_inode *nfsi = (struct nfs_inode *) foo; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d205466233f..a28f6ce2e13 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -217,3 +217,21 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize) if (sb->s_maxbytes > MAX_LFS_FILESIZE || sb->s_maxbytes <= 0) sb->s_maxbytes = MAX_LFS_FILESIZE; } + +/* + * Determine the number of bytes of data the page contains + */ +static inline +unsigned int nfs_page_length(struct page *page) +{ + loff_t i_size = i_size_read(page->mapping->host); + + if (i_size > 0) { + pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + if (page->index < end_index) + return PAGE_CACHE_SIZE; + if (page->index == end_index) + return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; + } + return 0; +} diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index ec1114b33d8..371b804e7cc 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -18,10 +18,10 @@ #define NFSDBG_FACILITY NFSDBG_VFS -static void nfs_expire_automounts(void *list); +static void nfs_expire_automounts(struct work_struct *work); LIST_HEAD(nfs_automount_list); -static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); +static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts); int nfs_mountpoint_expiry_timeout = 500 * HZ; static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, @@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = { .follow_link = nfs_follow_mountpoint, }; -static void nfs_expire_automounts(void *data) +static void nfs_expire_automounts(struct work_struct *work) { - struct list_head *list = (struct list_head *)data; + struct list_head *list = &nfs_automount_list; mark_mounts_for_expiry(list); if (!list_empty(list)) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index e5f128ffc32..510ae524f3f 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -276,51 +276,6 @@ static int nfs3_proc_read(struct nfs_read_data *rdata) return status; } -static int nfs3_proc_write(struct nfs_write_data *wdata) -{ - int rpcflags = wdata->flags; - struct inode * inode = wdata->inode; - struct nfs_fattr * fattr = wdata->res.fattr; - struct rpc_message msg = { - .rpc_proc = &nfs3_procedures[NFS3PROC_WRITE], - .rpc_argp = &wdata->args, - .rpc_resp = &wdata->res, - .rpc_cred = wdata->cred, - }; - int status; - - dprintk("NFS call write %d @ %Ld\n", wdata->args.count, - (long long) wdata->args.offset); - nfs_fattr_init(fattr); - status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags); - if (status >= 0) - nfs_post_op_update_inode(inode, fattr); - dprintk("NFS reply write: %d\n", status); - return status < 0? status : wdata->res.count; -} - -static int nfs3_proc_commit(struct nfs_write_data *cdata) -{ - struct inode * inode = cdata->inode; - struct nfs_fattr * fattr = cdata->res.fattr; - struct rpc_message msg = { - .rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT], - .rpc_argp = &cdata->args, - .rpc_resp = &cdata->res, - .rpc_cred = cdata->cred, - }; - int status; - - dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, - (long long) cdata->args.offset); - nfs_fattr_init(fattr); - status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); - if (status >= 0) - nfs_post_op_update_inode(inode, fattr); - dprintk("NFS reply commit: %d\n", status); - return status; -} - /* * Create a regular file. * For now, we don't implement O_EXCL. @@ -369,7 +324,7 @@ again: /* If the server doesn't support the exclusive creation semantics, * try again with simple 'guarded' mode. */ - if (status == NFSERR_NOTSUPP) { + if (status == -ENOTSUPP) { switch (arg.createmode) { case NFS3_CREATE_EXCLUSIVE: arg.createmode = NFS3_CREATE_GUARDED; @@ -690,8 +645,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, }; int status; - lock_kernel(); - if (plus) msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS]; @@ -702,7 +655,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_refresh_inode(dir, &dir_attr); dprintk("NFS reply readdir: %d\n", status); - unlock_kernel(); return status; } @@ -904,8 +856,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = { .access = nfs3_proc_access, .readlink = nfs3_proc_readlink, .read = nfs3_proc_read, - .write = nfs3_proc_write, - .commit = nfs3_proc_commit, .create = nfs3_proc_create, .remove = nfs3_proc_remove, .unlink_setup = nfs3_proc_unlink_setup, diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 6f346677332..c26cd978c7c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2]; extern void nfs4_schedule_state_renewal(struct nfs_client *); extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); extern void nfs4_kill_renewd(struct nfs_client *); -extern void nfs4_renew_state(void *); +extern void nfs4_renew_state(struct work_struct *); /* nfs4state.c */ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8118036cc44..ee458aeab24 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -636,7 +636,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) smp_wmb(); } else status = data->rpc_status; - rpc_release_task(task); + rpc_put_task(task); return status; } @@ -742,7 +742,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) smp_wmb(); } else status = data->rpc_status; - rpc_release_task(task); + rpc_put_task(task); if (status != 0) return status; @@ -1775,89 +1775,6 @@ static int nfs4_proc_read(struct nfs_read_data *rdata) return err; } -static int _nfs4_proc_write(struct nfs_write_data *wdata) -{ - int rpcflags = wdata->flags; - struct inode *inode = wdata->inode; - struct nfs_fattr *fattr = wdata->res.fattr; - struct nfs_server *server = NFS_SERVER(inode); - struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE], - .rpc_argp = &wdata->args, - .rpc_resp = &wdata->res, - .rpc_cred = wdata->cred, - }; - int status; - - dprintk("NFS call write %d @ %Ld\n", wdata->args.count, - (long long) wdata->args.offset); - - wdata->args.bitmask = server->attr_bitmask; - wdata->res.server = server; - wdata->timestamp = jiffies; - nfs_fattr_init(fattr); - status = rpc_call_sync(server->client, &msg, rpcflags); - dprintk("NFS reply write: %d\n", status); - if (status < 0) - return status; - renew_lease(server, wdata->timestamp); - nfs_post_op_update_inode(inode, fattr); - return wdata->res.count; -} - -static int nfs4_proc_write(struct nfs_write_data *wdata) -{ - struct nfs4_exception exception = { }; - int err; - do { - err = nfs4_handle_exception(NFS_SERVER(wdata->inode), - _nfs4_proc_write(wdata), - &exception); - } while (exception.retry); - return err; -} - -static int _nfs4_proc_commit(struct nfs_write_data *cdata) -{ - struct inode *inode = cdata->inode; - struct nfs_fattr *fattr = cdata->res.fattr; - struct nfs_server *server = NFS_SERVER(inode); - struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], - .rpc_argp = &cdata->args, - .rpc_resp = &cdata->res, - .rpc_cred = cdata->cred, - }; - int status; - - dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, - (long long) cdata->args.offset); - - cdata->args.bitmask = server->attr_bitmask; - cdata->res.server = server; - cdata->timestamp = jiffies; - nfs_fattr_init(fattr); - status = rpc_call_sync(server->client, &msg, 0); - if (status >= 0) - renew_lease(server, cdata->timestamp); - dprintk("NFS reply commit: %d\n", status); - if (status >= 0) - nfs_post_op_update_inode(inode, fattr); - return status; -} - -static int nfs4_proc_commit(struct nfs_write_data *cdata) -{ - struct nfs4_exception exception = { }; - int err; - do { - err = nfs4_handle_exception(NFS_SERVER(cdata->inode), - _nfs4_proc_commit(cdata), - &exception); - } while (exception.retry); - return err; -} - /* * Got race? * We will need to arrange for the VFS layer to provide an atomic open. @@ -2223,13 +2140,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long long)cookie); - lock_kernel(); nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); res.pgbase = args.pgbase; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (status == 0) memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); - unlock_kernel(); dprintk("%s: returns %d\n", __FUNCTION__, status); return status; } @@ -3067,7 +2982,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co if (status == 0) nfs_post_op_update_inode(inode, &data->fattr); } - rpc_release_task(task); + rpc_put_task(task); return status; } @@ -3314,7 +3229,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * if (IS_ERR(task)) goto out; status = nfs4_wait_for_completion_rpc_task(task); - rpc_release_task(task); + rpc_put_task(task); out: return status; } @@ -3430,7 +3345,7 @@ static void nfs4_lock_release(void *calldata) task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, data->arg.lock_seqid); if (!IS_ERR(task)) - rpc_release_task(task); + rpc_put_task(task); dprintk("%s: cancelling lock!\n", __FUNCTION__); } else nfs_free_seqid(data->arg.lock_seqid); @@ -3472,7 +3387,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f ret = -EAGAIN; } else data->cancelled = 1; - rpc_release_task(task); + rpc_put_task(task); dprintk("%s: done, ret = %d!\n", __FUNCTION__, ret); return ret; } @@ -3732,8 +3647,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .access = nfs4_proc_access, .readlink = nfs4_proc_readlink, .read = nfs4_proc_read, - .write = nfs4_proc_write, - .commit = nfs4_proc_commit, .create = nfs4_proc_create, .remove = nfs4_proc_remove, .unlink_setup = nfs4_proc_unlink_setup, diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 7b6df1852e7..823298561c0 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -59,9 +59,10 @@ #define NFSDBG_FACILITY NFSDBG_PROC void -nfs4_renew_state(void *data) +nfs4_renew_state(struct work_struct *work) { - struct nfs_client *clp = (struct nfs_client *)data; + struct nfs_client *clp = + container_of(work, struct nfs_client, cl_renewd.work); struct rpc_cred *cred; long lease, timeout; unsigned long last, now; diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 8dfefe41a8d..75f819dc025 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -98,7 +98,7 @@ static char nfs_root_name[256] __initdata = ""; /* Address of NFS server */ -static __u32 servaddr __initdata = 0; +static __be32 servaddr __initdata = 0; /* Name of directory to mount */ static char nfs_path[NFS_MAXPATHLEN] __initdata = { 0, }; @@ -327,7 +327,7 @@ static int __init root_nfs_name(char *name) */ static int __init root_nfs_addr(void) { - if ((servaddr = root_server_addr) == INADDR_NONE) { + if ((servaddr = root_server_addr) == htonl(INADDR_NONE)) { printk(KERN_ERR "Root-NFS: No NFS server available, giving up.\n"); return -1; } @@ -411,7 +411,7 @@ __setup("nfsroot=", nfs_root_setup); * Construct sockaddr_in from address and port number. */ static inline void -set_sockaddr(struct sockaddr_in *sin, __u32 addr, __u16 port) +set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = addr; @@ -468,14 +468,13 @@ static int __init root_nfs_ports(void) dprintk("Root-NFS: Portmapper on server returned %d " "as nfsd port\n", port); } - nfs_port = htons(nfs_port); if ((port = root_nfs_getport(NFS_MNT_PROGRAM, mountd_ver, proto)) < 0) { printk(KERN_ERR "Root-NFS: Unable to get mountd port " "number from server, using default\n"); port = mountd_port; } - mount_port = htons(port); + mount_port = port; dprintk("Root-NFS: mountd port is %d\n", port); return 0; @@ -496,7 +495,7 @@ static int __init root_nfs_get_handle(void) int version = (nfs_data.flags & NFS_MOUNT_VER3) ? NFS_MNT3_VERSION : NFS_MNT_VERSION; - set_sockaddr(&sin, servaddr, mount_port); + set_sockaddr(&sin, servaddr, htons(mount_port)); status = nfsroot_mount(&sin, nfs_path, &fh, version, protocol); if (status < 0) printk(KERN_ERR "Root-NFS: Server returned error %d " @@ -519,6 +518,6 @@ void * __init nfs_root_data(void) || root_nfs_ports() < 0 || root_nfs_get_handle() < 0) return NULL; - set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, nfs_port); + set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, htons(nfs_port)); return (void*)&nfs_data; } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 829af323f28..ca4b1d4ff42 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -17,16 +17,17 @@ #include <linux/nfs_page.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> +#include <linux/writeback.h> #define NFS_PARANOIA 1 -static kmem_cache_t *nfs_page_cachep; +static struct kmem_cache *nfs_page_cachep; static inline struct nfs_page * nfs_page_alloc(void) { struct nfs_page *p; - p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL); + p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); if (p) { memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->wb_list); @@ -268,11 +269,10 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst, #define NFS_SCAN_MAXENTRIES 16 /** - * nfs_scan_lock_dirty - Scan the radix tree for dirty requests - * @nfsi: NFS inode + * nfs_scan_dirty - Scan the radix tree for dirty requests + * @mapping: pointer to address space + * @wbc: writeback_control structure * @dst: Destination list - * @idx_start: lower bound of page->index to scan - * @npages: idx_start + npages sets the upper bound to scan. * * Moves elements from one of the inode request lists. * If the number of requests is set to 0, the entire address_space @@ -280,46 +280,63 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst, * The requests are *not* checked to ensure that they form a contiguous set. * You must be holding the inode's req_lock when calling this function */ -int -nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst, - unsigned long idx_start, unsigned int npages) +long nfs_scan_dirty(struct address_space *mapping, + struct writeback_control *wbc, + struct list_head *dst) { + struct nfs_inode *nfsi = NFS_I(mapping->host); struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; struct nfs_page *req; - unsigned long idx_end; + pgoff_t idx_start, idx_end; + long res = 0; int found, i; - int res; - res = 0; - if (npages == 0) - idx_end = ~0; - else - idx_end = idx_start + npages - 1; + if (nfsi->ndirty == 0) + return 0; + if (wbc->range_cyclic) { + idx_start = 0; + idx_end = ULONG_MAX; + } else if (wbc->range_end == 0) { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = ULONG_MAX; + } else { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; + } for (;;) { + unsigned int toscan = NFS_SCAN_MAXENTRIES; + found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, - (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES, + (void **)&pgvec[0], idx_start, toscan, NFS_PAGE_TAG_DIRTY); + + /* Did we make progress? */ if (found <= 0) break; + for (i = 0; i < found; i++) { req = pgvec[i]; - if (req->wb_index > idx_end) + if (!wbc->range_cyclic && req->wb_index > idx_end) goto out; + /* Try to lock request and mark it for writeback */ + if (!nfs_set_page_writeback_locked(req)) + goto next; + radix_tree_tag_clear(&nfsi->nfs_page_tree, + req->wb_index, NFS_PAGE_TAG_DIRTY); + nfsi->ndirty--; + nfs_list_remove_request(req); + nfs_list_add_request(req, dst); + res++; + if (res == LONG_MAX) + goto out; +next: idx_start = req->wb_index + 1; - - if (nfs_set_page_writeback_locked(req)) { - radix_tree_tag_clear(&nfsi->nfs_page_tree, - req->wb_index, NFS_PAGE_TAG_DIRTY); - nfs_list_remove_request(req); - nfs_list_add_request(req, dst); - dec_zone_page_state(req->wb_page, NR_FILE_DIRTY); - res++; - } } } out: + WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); return res; } diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 4529cc4f3f8..10f5e80ca15 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -215,32 +215,6 @@ static int nfs_proc_read(struct nfs_read_data *rdata) return status; } -static int nfs_proc_write(struct nfs_write_data *wdata) -{ - int flags = wdata->flags; - struct inode * inode = wdata->inode; - struct nfs_fattr * fattr = wdata->res.fattr; - struct rpc_message msg = { - .rpc_proc = &nfs_procedures[NFSPROC_WRITE], - .rpc_argp = &wdata->args, - .rpc_resp = &wdata->res, - .rpc_cred = wdata->cred, - }; - int status; - - dprintk("NFS call write %d @ %Ld\n", wdata->args.count, - (long long) wdata->args.offset); - nfs_fattr_init(fattr); - status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); - if (status >= 0) { - nfs_post_op_update_inode(inode, fattr); - wdata->res.count = wdata->args.count; - wdata->verf.committed = NFS_FILE_SYNC; - } - dprintk("NFS reply write: %d\n", status); - return status < 0? status : wdata->res.count; -} - static int nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags, struct nameidata *nd) @@ -545,13 +519,10 @@ nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, }; int status; - lock_kernel(); - dprintk("NFS call readdir %d\n", (unsigned int)cookie); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); dprintk("NFS reply readdir: %d\n", status); - unlock_kernel(); return status; } @@ -696,8 +667,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = { .access = NULL, /* access */ .readlink = nfs_proc_readlink, .read = nfs_proc_read, - .write = nfs_proc_write, - .commit = NULL, /* commit */ .create = nfs_proc_create, .remove = nfs_proc_remove, .unlink_setup = nfs_proc_unlink_setup, diff --git a/fs/nfs/read.c b/fs/nfs/read.c index c2e49c397a2..a9c26521a9e 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -30,6 +30,7 @@ #include <asm/system.h> +#include "internal.h" #include "iostat.h" #define NFSDBG_FACILITY NFSDBG_PAGECACHE @@ -38,7 +39,7 @@ static int nfs_pagein_one(struct list_head *, struct inode *); static const struct rpc_call_ops nfs_read_partial_ops; static const struct rpc_call_ops nfs_read_full_ops; -static kmem_cache_t *nfs_rdata_cachep; +static struct kmem_cache *nfs_rdata_cachep; static mempool_t *nfs_rdata_mempool; #define MIN_POOL_READ (32) @@ -46,7 +47,7 @@ static mempool_t *nfs_rdata_mempool; struct nfs_read_data *nfs_readdata_alloc(size_t len) { unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); + struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); if (p) { memset(p, 0, sizeof(*p)); @@ -65,32 +66,22 @@ struct nfs_read_data *nfs_readdata_alloc(size_t len) return p; } -static void nfs_readdata_free(struct nfs_read_data *p) +static void nfs_readdata_rcu_free(struct rcu_head *head) { + struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu); if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); mempool_free(p, nfs_rdata_mempool); } -void nfs_readdata_release(void *data) +static void nfs_readdata_free(struct nfs_read_data *rdata) { - nfs_readdata_free(data); + call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free); } -static -unsigned int nfs_page_length(struct inode *inode, struct page *page) +void nfs_readdata_release(void *data) { - loff_t i_size = i_size_read(inode); - unsigned long idx; - - if (i_size <= 0) - return 0; - idx = (i_size - 1) >> PAGE_CACHE_SHIFT; - if (page->index > idx) - return 0; - if (page->index != idx) - return PAGE_CACHE_SIZE; - return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1)); + nfs_readdata_free(data); } static @@ -139,12 +130,12 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, { unsigned int rsize = NFS_SERVER(inode)->rsize; unsigned int count = PAGE_CACHE_SIZE; - int result; + int result = -ENOMEM; struct nfs_read_data *rdata; rdata = nfs_readdata_alloc(count); if (!rdata) - return -ENOMEM; + goto out_unlock; memset(rdata, 0, sizeof(*rdata)); rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); @@ -212,8 +203,9 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, result = 0; io_error: - unlock_page(page); nfs_readdata_free(rdata); +out_unlock: + unlock_page(page); return result; } @@ -224,7 +216,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, struct nfs_page *new; unsigned int len; - len = nfs_page_length(inode, page); + len = nfs_page_length(page); if (len == 0) return nfs_return_empty_page(page); new = nfs_create_request(ctx, inode, page, 0, len); @@ -316,9 +308,7 @@ static void nfs_execute_read(struct nfs_read_data *data) sigset_t oldset; rpc_clnt_sigmask(clnt, &oldset); - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); rpc_clnt_sigunmask(clnt, &oldset); } @@ -455,6 +445,55 @@ nfs_pagein_list(struct list_head *head, int rpages) } /* + * This is the callback from RPC telling us whether a reply was + * received or some error occurred (timeout or socket shutdown). + */ +int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) +{ + int status; + + dprintk("%s: %4d, (status %d)\n", __FUNCTION__, task->tk_pid, + task->tk_status); + + status = NFS_PROTO(data->inode)->read_done(task, data); + if (status != 0) + return status; + + nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count); + + if (task->tk_status == -ESTALE) { + set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode)); + nfs_mark_for_revalidate(data->inode); + } + spin_lock(&data->inode->i_lock); + NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; + spin_unlock(&data->inode->i_lock); + return 0; +} + +static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) +{ + struct nfs_readargs *argp = &data->args; + struct nfs_readres *resp = &data->res; + + if (resp->eof || resp->count == argp->count) + return 0; + + /* This is a short read! */ + nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); + /* Has the server at least made some progress? */ + if (resp->count == 0) + return 0; + + /* Yes, so retry the read at the end of the data */ + argp->offset += resp->count; + argp->pgbase += resp->count; + argp->count -= resp->count; + rpc_restart_call(task); + return -EAGAIN; +} + +/* * Handle a read reply that fills part of a page. */ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) @@ -463,12 +502,16 @@ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) struct nfs_page *req = data->req; struct page *page = req->wb_page; - if (likely(task->tk_status >= 0)) - nfs_readpage_truncate_uninitialised_page(data); - else - SetPageError(page); if (nfs_readpage_result(task, data) != 0) return; + + if (likely(task->tk_status >= 0)) { + nfs_readpage_truncate_uninitialised_page(data); + if (nfs_readpage_retry(task, data) != 0) + return; + } + if (unlikely(task->tk_status < 0)) + SetPageError(page); if (atomic_dec_and_test(&req->wb_complete)) { if (!PageError(page)) SetPageUptodate(page); @@ -496,25 +539,13 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageUptodate(*pages); - if (count != 0) + if (count == 0) + return; + /* Was this a short read? */ + if (data->res.eof || data->res.count == data->args.count) SetPageUptodate(*pages); } -static void nfs_readpage_set_pages_error(struct nfs_read_data *data) -{ - unsigned int count = data->args.count; - unsigned int base = data->args.pgbase; - struct page **pages; - - pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; - base &= ~PAGE_CACHE_MASK; - count += base; - for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) - SetPageError(*pages); - if (count != 0) - SetPageError(*pages); -} - /* * This is the callback from RPC telling us whether a reply was * received or some error occurred (timeout or socket shutdown). @@ -523,19 +554,20 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) { struct nfs_read_data *data = calldata; + if (nfs_readpage_result(task, data) != 0) + return; /* - * Note: nfs_readpage_result may change the values of + * Note: nfs_readpage_retry may change the values of * data->args. In the multi-page case, we therefore need - * to ensure that we call the next nfs_readpage_set_page_uptodate() - * first in the multi-page case. + * to ensure that we call nfs_readpage_set_pages_uptodate() + * first. */ if (likely(task->tk_status >= 0)) { nfs_readpage_truncate_uninitialised_page(data); nfs_readpage_set_pages_uptodate(data); - } else - nfs_readpage_set_pages_error(data); - if (nfs_readpage_result(task, data) != 0) - return; + if (nfs_readpage_retry(task, data) != 0) + return; + } while (!list_empty(&data->pages)) { struct nfs_page *req = nfs_list_entry(data->pages.next); @@ -550,50 +582,6 @@ static const struct rpc_call_ops nfs_read_full_ops = { }; /* - * This is the callback from RPC telling us whether a reply was - * received or some error occurred (timeout or socket shutdown). - */ -int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) -{ - struct nfs_readargs *argp = &data->args; - struct nfs_readres *resp = &data->res; - int status; - - dprintk("NFS: %4d nfs_readpage_result, (status %d)\n", - task->tk_pid, task->tk_status); - - status = NFS_PROTO(data->inode)->read_done(task, data); - if (status != 0) - return status; - - nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count); - - if (task->tk_status < 0) { - if (task->tk_status == -ESTALE) { - set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode)); - nfs_mark_for_revalidate(data->inode); - } - } else if (resp->count < argp->count && !resp->eof) { - /* This is a short read! */ - nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); - /* Has the server at least made some progress? */ - if (resp->count != 0) { - /* Yes, so retry the read at the end of the data */ - argp->offset += resp->count; - argp->pgbase += resp->count; - argp->count -= resp->count; - rpc_restart_call(task); - return -EAGAIN; - } - task->tk_status = -EIO; - } - spin_lock(&data->inode->i_lock); - NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; - spin_unlock(&data->inode->i_lock); - return 0; -} - -/* * Read a page over NFS. * We read the page synchronously in the following case: * - The error flag is set for this page. This happens only when a @@ -626,9 +614,10 @@ int nfs_readpage(struct file *file, struct page *page) goto out_error; if (file == NULL) { + error = -EBADF; ctx = nfs_find_open_context(inode, NULL, FMODE_READ); if (ctx == NULL) - return -EBADF; + goto out_error; } else ctx = get_nfs_open_context((struct nfs_open_context *) file->private_data); @@ -663,7 +652,7 @@ readpage_async_filler(void *data, struct page *page) unsigned int len; nfs_wb_page(inode, page); - len = nfs_page_length(inode, page); + len = nfs_page_length(page); if (len == 0) return nfs_return_empty_page(page); new = nfs_create_request(desc->ctx, inode, page, 0, len); diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 600bbe630ab..6c686112cc0 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c @@ -33,9 +33,7 @@ static int nfs_symlink_filler(struct inode *inode, struct page *page) { int error; - lock_kernel(); error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); - unlock_kernel(); if (error < 0) goto error; SetPageUptodate(page); diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index 2fe3403c240..3ea50ac6482 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -18,11 +18,6 @@ static const int nfs_set_port_min = 0; static const int nfs_set_port_max = 65535; static struct ctl_table_header *nfs_callback_sysctl_table; -/* - * Something that isn't CTL_ANY, CTL_NONE or a value that may clash. - * Use the same values as fs/lockd/svc.c - */ -#define CTL_UNNUMBERED -2 static ctl_table nfs_cb_sysctls[] = { #ifdef CONFIG_NFS_V4 diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 883dd4a1c15..594eb16879e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -63,6 +63,7 @@ #include <linux/smp_lock.h> #include "delegation.h" +#include "internal.h" #include "iostat.h" #define NFSDBG_FACILITY NFSDBG_PAGECACHE @@ -74,18 +75,17 @@ * Local function declarations */ static struct nfs_page * nfs_update_request(struct nfs_open_context*, - struct inode *, struct page *, unsigned int, unsigned int); +static void nfs_mark_request_dirty(struct nfs_page *req); static int nfs_wait_on_write_congestion(struct address_space *, int); static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); -static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, - unsigned int npages, int how); +static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); static const struct rpc_call_ops nfs_write_partial_ops; static const struct rpc_call_ops nfs_write_full_ops; static const struct rpc_call_ops nfs_commit_ops; -static kmem_cache_t *nfs_wdata_cachep; +static struct kmem_cache *nfs_wdata_cachep; static mempool_t *nfs_wdata_mempool; static mempool_t *nfs_commit_mempool; @@ -93,7 +93,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); struct nfs_write_data *nfs_commit_alloc(void) { - struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); + struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); if (p) { memset(p, 0, sizeof(*p)); @@ -102,17 +102,23 @@ struct nfs_write_data *nfs_commit_alloc(void) return p; } -void nfs_commit_free(struct nfs_write_data *p) +void nfs_commit_rcu_free(struct rcu_head *head) { + struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); mempool_free(p, nfs_commit_mempool); } +void nfs_commit_free(struct nfs_write_data *wdata) +{ + call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); +} + struct nfs_write_data *nfs_writedata_alloc(size_t len) { unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); + struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); if (p) { memset(p, 0, sizeof(*p)); @@ -131,18 +137,47 @@ struct nfs_write_data *nfs_writedata_alloc(size_t len) return p; } -static void nfs_writedata_free(struct nfs_write_data *p) +static void nfs_writedata_rcu_free(struct rcu_head *head) { + struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); mempool_free(p, nfs_wdata_mempool); } +static void nfs_writedata_free(struct nfs_write_data *wdata) +{ + call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free); +} + void nfs_writedata_release(void *wdata) { nfs_writedata_free(wdata); } +static struct nfs_page *nfs_page_find_request_locked(struct page *page) +{ + struct nfs_page *req = NULL; + + if (PagePrivate(page)) { + req = (struct nfs_page *)page_private(page); + if (req != NULL) + atomic_inc(&req->wb_count); + } + return req; +} + +static struct nfs_page *nfs_page_find_request(struct page *page) +{ + struct nfs_page *req = NULL; + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; + + spin_lock(req_lock); + req = nfs_page_find_request_locked(page); + spin_unlock(req_lock); + return req; +} + /* Adjust the file length if we're writing beyond the end */ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) { @@ -164,113 +199,34 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c */ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count) { - loff_t end_offs; - if (PageUptodate(page)) return; if (base != 0) return; - if (count == PAGE_CACHE_SIZE) { - SetPageUptodate(page); - return; - } - - end_offs = i_size_read(page->mapping->host) - 1; - if (end_offs < 0) + if (count != nfs_page_length(page)) return; - /* Is this the last page? */ - if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT)) - return; - /* This is the last page: set PG_uptodate if we cover the entire - * extent of the data, then zero the rest of the page. - */ - if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) { + if (count != PAGE_CACHE_SIZE) memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count); - SetPageUptodate(page); - } + SetPageUptodate(page); } -/* - * Write a page synchronously. - * Offset is the data offset within the page. - */ -static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, - struct page *page, unsigned int offset, unsigned int count, - int how) -{ - unsigned int wsize = NFS_SERVER(inode)->wsize; - int result, written = 0; - struct nfs_write_data *wdata; - - wdata = nfs_writedata_alloc(wsize); - if (!wdata) - return -ENOMEM; - - wdata->flags = how; - wdata->cred = ctx->cred; - wdata->inode = inode; - wdata->args.fh = NFS_FH(inode); - wdata->args.context = ctx; - wdata->args.pages = &page; - wdata->args.stable = NFS_FILE_SYNC; - wdata->args.pgbase = offset; - wdata->args.count = wsize; - wdata->res.fattr = &wdata->fattr; - wdata->res.verf = &wdata->verf; - - dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n", - inode->i_sb->s_id, - (long long)NFS_FILEID(inode), - count, (long long)(page_offset(page) + offset)); - - set_page_writeback(page); - nfs_begin_data_update(inode); - do { - if (count < wsize) - wdata->args.count = count; - wdata->args.offset = page_offset(page) + wdata->args.pgbase; - - result = NFS_PROTO(inode)->write(wdata); - - if (result < 0) { - /* Must mark the page invalid after I/O error */ - ClearPageUptodate(page); - goto io_error; - } - if (result < wdata->args.count) - printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n", - wdata->args.count, result); - - wdata->args.offset += result; - wdata->args.pgbase += result; - written += result; - count -= result; - nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result); - } while (count); - /* Update file length */ - nfs_grow_file(page, offset, written); - /* Set the PG_uptodate flag? */ - nfs_mark_uptodate(page, offset, written); - - if (PageError(page)) - ClearPageError(page); - -io_error: - nfs_end_data_update(inode); - end_page_writeback(page); - nfs_writedata_free(wdata); - return written ? written : result; -} - -static int nfs_writepage_async(struct nfs_open_context *ctx, - struct inode *inode, struct page *page, +static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, unsigned int offset, unsigned int count) { struct nfs_page *req; + int ret; - req = nfs_update_request(ctx, inode, page, offset, count); - if (IS_ERR(req)) - return PTR_ERR(req); + for (;;) { + req = nfs_update_request(ctx, page, offset, count); + if (!IS_ERR(req)) + break; + ret = PTR_ERR(req); + if (ret != -EBUSY) + return ret; + ret = nfs_wb_page(page->mapping->host, page); + if (ret != 0) + return ret; + } /* Update file length */ nfs_grow_file(page, offset, count); /* Set the PG_uptodate flag? */ @@ -289,73 +245,94 @@ static int wb_priority(struct writeback_control *wbc) } /* + * Find an associated nfs write request, and prepare to flush it out + * Returns 1 if there was no write request, or if the request was + * already tagged by nfs_set_page_dirty.Returns 0 if the request + * was not tagged. + * May also return an error if the user signalled nfs_wait_on_request(). + */ +static int nfs_page_mark_flush(struct page *page) +{ + struct nfs_page *req; + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; + int ret; + + spin_lock(req_lock); + for(;;) { + req = nfs_page_find_request_locked(page); + if (req == NULL) { + spin_unlock(req_lock); + return 1; + } + if (nfs_lock_request_dontget(req)) + break; + /* Note: If we hold the page lock, as is the case in nfs_writepage, + * then the call to nfs_lock_request_dontget() will always + * succeed provided that someone hasn't already marked the + * request as dirty (in which case we don't care). + */ + spin_unlock(req_lock); + ret = nfs_wait_on_request(req); + nfs_release_request(req); + if (ret != 0) + return ret; + spin_lock(req_lock); + } + spin_unlock(req_lock); + if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) { + nfs_mark_request_dirty(req); + set_page_writeback(page); + } + ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); + nfs_unlock_request(req); + return ret; +} + +/* * Write an mmapped page to the server. */ -int nfs_writepage(struct page *page, struct writeback_control *wbc) +static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) { struct nfs_open_context *ctx; struct inode *inode = page->mapping->host; - unsigned long end_index; - unsigned offset = PAGE_CACHE_SIZE; - loff_t i_size = i_size_read(inode); - int inode_referenced = 0; - int priority = wb_priority(wbc); + unsigned offset; int err; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - /* - * Note: We need to ensure that we have a reference to the inode - * if we are to do asynchronous writes. If not, waiting - * in nfs_wait_on_request() may deadlock with clear_inode(). - * - * If igrab() fails here, then it is in any case safe to - * call nfs_wb_page(), since there will be no pending writes. - */ - if (igrab(inode) != 0) - inode_referenced = 1; - end_index = i_size >> PAGE_CACHE_SHIFT; - - /* Ensure we've flushed out any previous writes */ - nfs_wb_page_priority(inode, page, priority); - - /* easy case */ - if (page->index < end_index) - goto do_it; - /* things got complicated... */ - offset = i_size & (PAGE_CACHE_SIZE-1); - - /* OK, are we completely out? */ - err = 0; /* potential race with truncate - ignore */ - if (page->index >= end_index+1 || !offset) + err = nfs_page_mark_flush(page); + if (err <= 0) + goto out; + err = 0; + offset = nfs_page_length(page); + if (!offset) goto out; -do_it: + ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE); if (ctx == NULL) { err = -EBADF; goto out; } - lock_kernel(); - if (!IS_SYNC(inode) && inode_referenced) { - err = nfs_writepage_async(ctx, inode, page, 0, offset); - if (!wbc->for_writepages) - nfs_flush_inode(inode, 0, 0, wb_priority(wbc)); - } else { - err = nfs_writepage_sync(ctx, inode, page, 0, - offset, priority); - if (err >= 0) { - if (err != offset) - redirty_page_for_writepage(wbc, page); - err = 0; - } - } - unlock_kernel(); + err = nfs_writepage_setup(ctx, page, 0, offset); put_nfs_open_context(ctx); + if (err != 0) + goto out; + err = nfs_page_mark_flush(page); + if (err > 0) + err = 0; out: + if (!wbc->for_writepages) + nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc)); + return err; +} + +int nfs_writepage(struct page *page, struct writeback_control *wbc) +{ + int err; + + err = nfs_writepage_locked(page, wbc); unlock_page(page); - if (inode_referenced) - iput(inode); return err; } @@ -379,21 +356,18 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) return 0; nfs_wait_on_write_congestion(mapping, 0); } - err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc)); + err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); if (err < 0) goto out; nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); - wbc->nr_to_write -= err; if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) { err = nfs_wait_on_requests(inode, 0, 0); if (err < 0) goto out; } err = nfs_commit_inode(inode, wb_priority(wbc)); - if (err > 0) { - wbc->nr_to_write -= err; + if (err > 0) err = 0; - } out: clear_bit(BDI_write_congested, &bdi->state); wake_up_all(&nfs_write_congestion); @@ -420,6 +394,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) nfsi->change_attr++; } SetPagePrivate(req->wb_page); + set_page_private(req->wb_page, (unsigned long)req); nfsi->npages++; atomic_inc(&req->wb_count); return 0; @@ -436,6 +411,7 @@ static void nfs_inode_remove_request(struct nfs_page *req) BUG_ON (!NFS_WBACK_BUSY(req)); spin_lock(&nfsi->req_lock); + set_page_private(req->wb_page, 0); ClearPagePrivate(req->wb_page); radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); nfsi->npages--; @@ -450,33 +426,6 @@ static void nfs_inode_remove_request(struct nfs_page *req) } /* - * Find a request - */ -static inline struct nfs_page * -_nfs_find_request(struct inode *inode, unsigned long index) -{ - struct nfs_inode *nfsi = NFS_I(inode); - struct nfs_page *req; - - req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index); - if (req) - atomic_inc(&req->wb_count); - return req; -} - -static struct nfs_page * -nfs_find_request(struct inode *inode, unsigned long index) -{ - struct nfs_page *req; - struct nfs_inode *nfsi = NFS_I(inode); - - spin_lock(&nfsi->req_lock); - req = _nfs_find_request(inode, index); - spin_unlock(&nfsi->req_lock); - return req; -} - -/* * Add a request to the inode's dirty list. */ static void @@ -491,8 +440,14 @@ nfs_mark_request_dirty(struct nfs_page *req) nfs_list_add_request(req, &nfsi->dirty); nfsi->ndirty++; spin_unlock(&nfsi->req_lock); - inc_zone_page_state(req->wb_page, NR_FILE_DIRTY); - mark_inode_dirty(inode); + __mark_inode_dirty(inode, I_DIRTY_PAGES); +} + +static void +nfs_redirty_request(struct nfs_page *req) +{ + clear_bit(PG_FLUSHING, &req->wb_flags); + __set_page_dirty_nobuffers(req->wb_page); } /* @@ -501,8 +456,7 @@ nfs_mark_request_dirty(struct nfs_page *req) static inline int nfs_dirty_request(struct nfs_page *req) { - struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); - return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty; + return test_bit(PG_FLUSHING, &req->wb_flags) == 0; } #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) @@ -520,7 +474,7 @@ nfs_mark_request_commit(struct nfs_page *req) nfsi->ncommit++; spin_unlock(&nfsi->req_lock); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - mark_inode_dirty(inode); + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } #endif @@ -597,31 +551,6 @@ static void nfs_cancel_commit_list(struct list_head *head) } } -/* - * nfs_scan_dirty - Scan an inode for dirty requests - * @inode: NFS inode to scan - * @dst: destination list - * @idx_start: lower bound of page->index to scan. - * @npages: idx_start + npages sets the upper bound to scan. - * - * Moves requests from the inode's dirty page list. - * The requests are *not* checked to ensure that they form a contiguous set. - */ -static int -nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) -{ - struct nfs_inode *nfsi = NFS_I(inode); - int res = 0; - - if (nfsi->ndirty != 0) { - res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); - nfsi->ndirty -= res; - if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) - printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); - } - return res; -} - #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) /* * nfs_scan_commit - Scan an inode for commit requests @@ -698,27 +627,27 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr) * Note: Should always be called with the Page Lock held! */ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, - struct inode *inode, struct page *page, - unsigned int offset, unsigned int bytes) + struct page *page, unsigned int offset, unsigned int bytes) { - struct nfs_server *server = NFS_SERVER(inode); + struct inode *inode = page->mapping->host; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *req, *new = NULL; unsigned long rqend, end; end = offset + bytes; - if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR)) + if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR)) return ERR_PTR(-ERESTARTSYS); for (;;) { /* Loop over all inode entries and see if we find * A request for the page we wish to update */ spin_lock(&nfsi->req_lock); - req = _nfs_find_request(inode, page->index); + req = nfs_page_find_request_locked(page); if (req) { if (!nfs_lock_request_dontget(req)) { int error; + spin_unlock(&nfsi->req_lock); error = nfs_wait_on_request(req); nfs_release_request(req); @@ -745,7 +674,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, return ERR_PTR(error); } spin_unlock(&nfsi->req_lock); - nfs_mark_request_dirty(new); return new; } spin_unlock(&nfsi->req_lock); @@ -786,9 +714,8 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, int nfs_flush_incompatible(struct file *file, struct page *page) { struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; - struct inode *inode = page->mapping->host; struct nfs_page *req; - int status = 0; + int do_flush, status; /* * Look for a request corresponding to this page. If there * is one, and it belongs to another file, we flush it out @@ -797,13 +724,18 @@ int nfs_flush_incompatible(struct file *file, struct page *page) * Also do the same if we find a request from an existing * dropped page. */ - req = nfs_find_request(inode, page->index); - if (req) { - if (req->wb_page != page || ctx != req->wb_context) - status = nfs_wb_page(inode, page); + do { + req = nfs_page_find_request(page); + if (req == NULL) + return 0; + do_flush = req->wb_page != page || req->wb_context != ctx + || !nfs_dirty_request(req); nfs_release_request(req); - } - return (status < 0) ? status : 0; + if (!do_flush) + return 0; + status = nfs_wb_page(page->mapping->host, page); + } while (status == 0); + return status; } /* @@ -817,7 +749,6 @@ int nfs_updatepage(struct file *file, struct page *page, { struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; struct inode *inode = page->mapping->host; - struct nfs_page *req; int status = 0; nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); @@ -827,62 +758,18 @@ int nfs_updatepage(struct file *file, struct page *page, file->f_dentry->d_name.name, count, (long long)(page_offset(page) +offset)); - if (IS_SYNC(inode)) { - status = nfs_writepage_sync(ctx, inode, page, offset, count, 0); - if (status > 0) { - if (offset == 0 && status == PAGE_CACHE_SIZE) - SetPageUptodate(page); - return 0; - } - return status; - } - /* If we're not using byte range locks, and we know the page * is entirely in cache, it may be more efficient to avoid * fragmenting write requests. */ if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { - loff_t end_offs = i_size_read(inode) - 1; - unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; - - count += offset; + count = max(count + offset, nfs_page_length(page)); offset = 0; - if (unlikely(end_offs < 0)) { - /* Do nothing */ - } else if (page->index == end_index) { - unsigned int pglen; - pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1; - if (count < pglen) - count = pglen; - } else if (page->index < end_index) - count = PAGE_CACHE_SIZE; } - /* - * Try to find an NFS request corresponding to this page - * and update it. - * If the existing request cannot be updated, we must flush - * it out now. - */ - do { - req = nfs_update_request(ctx, inode, page, offset, count); - status = (IS_ERR(req)) ? PTR_ERR(req) : 0; - if (status != -EBUSY) - break; - /* Request could not be updated. Flush it out and try again */ - status = nfs_wb_page(inode, page); - } while (status >= 0); - if (status < 0) - goto done; - - status = 0; + status = nfs_writepage_setup(ctx, page, offset, count); + __set_page_dirty_nobuffers(page); - /* Update file length */ - nfs_grow_file(page, offset, count); - /* Set the PG_uptodate flag? */ - nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); - nfs_unlock_request(req); -done: dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", status, (long long)i_size_read(inode)); if (status < 0) @@ -897,7 +784,7 @@ static void nfs_writepage_release(struct nfs_page *req) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) if (!PageError(req->wb_page)) { if (NFS_NEED_RESCHED(req)) { - nfs_mark_request_dirty(req); + nfs_redirty_request(req); goto out; } else if (NFS_NEED_COMMIT(req)) { nfs_mark_request_commit(req); @@ -979,9 +866,7 @@ static void nfs_execute_write(struct nfs_write_data *data) sigset_t oldset; rpc_clnt_sigmask(clnt, &oldset); - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); rpc_clnt_sigunmask(clnt, &oldset); } @@ -1015,7 +900,6 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) atomic_set(&req->wb_complete, requests); ClearPageError(page); - set_page_writeback(page); offset = 0; nbytes = req->wb_bytes; do { @@ -1043,9 +927,9 @@ out_bad: while (!list_empty(&list)) { data = list_entry(list.next, struct nfs_write_data, pages); list_del(&data->pages); - nfs_writedata_free(data); + nfs_writedata_release(data); } - nfs_mark_request_dirty(req); + nfs_redirty_request(req); nfs_clear_page_writeback(req); return -ENOMEM; } @@ -1076,7 +960,6 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) nfs_list_remove_request(req); nfs_list_add_request(req, &data->pages); ClearPageError(req->wb_page); - set_page_writeback(req->wb_page); *pages++ = req->wb_page; count += req->wb_bytes; } @@ -1091,7 +974,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) while (!list_empty(head)) { struct nfs_page *req = nfs_list_entry(head->next); nfs_list_remove_request(req); - nfs_mark_request_dirty(req); + nfs_redirty_request(req); nfs_clear_page_writeback(req); } return -ENOMEM; @@ -1126,7 +1009,7 @@ out_err: while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); - nfs_mark_request_dirty(req); + nfs_redirty_request(req); nfs_clear_page_writeback(req); } return error; @@ -1442,7 +1325,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) } /* We have a mismatch. Write the page again */ dprintk(" mismatch\n"); - nfs_mark_request_dirty(req); + nfs_redirty_request(req); next: nfs_clear_page_writeback(req); } @@ -1459,18 +1342,17 @@ static inline int nfs_commit_list(struct inode *inode, struct list_head *head, i } #endif -static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, - unsigned int npages, int how) +static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) { - struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_inode *nfsi = NFS_I(mapping->host); LIST_HEAD(head); - int res; + long res; spin_lock(&nfsi->req_lock); - res = nfs_scan_dirty(inode, &head, idx_start, npages); + res = nfs_scan_dirty(mapping, wbc, &head); spin_unlock(&nfsi->req_lock); if (res) { - int error = nfs_flush_list(inode, &head, res, how); + int error = nfs_flush_list(mapping->host, &head, res, how); if (error < 0) return error; } @@ -1496,38 +1378,62 @@ int nfs_commit_inode(struct inode *inode, int how) } #endif -int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, - unsigned int npages, int how) +long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) { + struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); + unsigned long idx_start, idx_end; + unsigned int npages = 0; LIST_HEAD(head); int nocommit = how & FLUSH_NOCOMMIT; - int pages, ret; - + long pages, ret; + + /* FIXME */ + if (wbc->range_cyclic) + idx_start = 0; + else { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; + if (idx_end > idx_start) { + unsigned long l_npages = 1 + idx_end - idx_start; + npages = l_npages; + if (sizeof(npages) != sizeof(l_npages) && + (unsigned long)npages != l_npages) + npages = 0; + } + } how &= ~FLUSH_NOCOMMIT; spin_lock(&nfsi->req_lock); do { + wbc->pages_skipped = 0; ret = nfs_wait_on_requests_locked(inode, idx_start, npages); if (ret != 0) continue; - pages = nfs_scan_dirty(inode, &head, idx_start, npages); + pages = nfs_scan_dirty(mapping, wbc, &head); if (pages != 0) { spin_unlock(&nfsi->req_lock); - if (how & FLUSH_INVALIDATE) + if (how & FLUSH_INVALIDATE) { nfs_cancel_dirty_list(&head); - else + ret = pages; + } else ret = nfs_flush_list(inode, &head, pages, how); spin_lock(&nfsi->req_lock); continue; } + if (wbc->pages_skipped != 0) + continue; if (nocommit) break; pages = nfs_scan_commit(inode, &head, idx_start, npages); - if (pages == 0) + if (pages == 0) { + if (wbc->pages_skipped != 0) + continue; break; + } if (how & FLUSH_INVALIDATE) { spin_unlock(&nfsi->req_lock); nfs_cancel_commit_list(&head); + ret = pages; spin_lock(&nfsi->req_lock); continue; } @@ -1540,6 +1446,106 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, return ret; } +/* + * flush the inode to disk. + */ +int nfs_wb_all(struct inode *inode) +{ + struct address_space *mapping = inode->i_mapping; + struct writeback_control wbc = { + .bdi = mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .for_writepages = 1, + .range_cyclic = 1, + }; + int ret; + + ret = generic_writepages(mapping, &wbc); + if (ret < 0) + goto out; + ret = nfs_sync_mapping_wait(mapping, &wbc, 0); + if (ret >= 0) + return 0; +out: + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return ret; +} + +int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how) +{ + struct writeback_control wbc = { + .bdi = mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = range_start, + .range_end = range_end, + .for_writepages = 1, + }; + int ret; + + if (!(how & FLUSH_NOWRITEPAGE)) { + ret = generic_writepages(mapping, &wbc); + if (ret < 0) + goto out; + } + ret = nfs_sync_mapping_wait(mapping, &wbc, how); + if (ret >= 0) + return 0; +out: + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return ret; +} + +int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) +{ + loff_t range_start = page_offset(page); + loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + struct writeback_control wbc = { + .bdi = page->mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = range_start, + .range_end = range_end, + }; + int ret; + + BUG_ON(!PageLocked(page)); + if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) { + ret = nfs_writepage_locked(page, &wbc); + if (ret < 0) + goto out; + } + ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); + if (ret >= 0) + return 0; +out: + __mark_inode_dirty(inode, I_DIRTY_PAGES); + return ret; +} + +/* + * Write back all requests on one page - we do this before reading it. + */ +int nfs_wb_page(struct inode *inode, struct page* page) +{ + return nfs_wb_page_priority(inode, page, FLUSH_STABLE); +} + +int nfs_set_page_dirty(struct page *page) +{ + struct nfs_page *req; + + req = nfs_page_find_request(page); + if (req != NULL) { + /* Mark any existing write requests for flushing */ + set_bit(PG_NEED_FLUSH, &req->wb_flags); + nfs_release_request(req); + } + return __set_page_dirty_nobuffers(page); +} + + int __init nfs_init_writepagecache(void) { nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |